text
stringlengths 145
7.65M
|
---|
===================================================================================================================================
SOURCE CODE FILE: XPUCachingAllocator.h
LINES: 1
SIZE: 0.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\c10\xpu\XPUCachingAllocator.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/CachingDeviceAllocator.h>
#include <c10/xpu/XPUStream.h>
namespace c10::xpu::XPUCachingAllocator {
C10_XPU_API Allocator* get();
C10_XPU_API void init(DeviceIndex device_count);
C10_XPU_API void emptyCache();
C10_XPU_API void resetPeakStats(DeviceIndex device);
C10_XPU_API void resetAccumulatedStats(DeviceIndex device);
C10_XPU_API c10::CachingDeviceAllocator::DeviceStats getDeviceStats(
DeviceIndex device);
C10_XPU_API void* raw_alloc(size_t size);
C10_XPU_API void raw_delete(void* ptr);
C10_XPU_API void recordStream(const DataPtr& dataPtr, XPUStream stream);
} // namespace c10::xpu::XPUCachingAllocator
```
|
=============================================================================================================================
SOURCE CODE FILE: XPUDeviceProp.h
LINES: 1
SIZE: 12.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\c10\xpu\XPUDeviceProp.h
ENCODING: utf-8
```h
#pragma once
#include <c10/xpu/XPUMacros.h>
#include <sycl/sycl.hpp>
namespace c10::xpu {
#define AT_FORALL_XPU_DEVICE_PROPERTIES(_) \
/* the device name of this SYCL device. */ \
_(name) \
\
/* the device type associated with the device. */ \
_(device_type) \
\
/* the vendor of this SYCL device. */ \
_(vendor) \
\
/* a backend-defined driver version as a std::string. */ \
_(driver_version) \
\
/* the SYCL version as a std::string in the form <major>.<minor> */ \
_(version) \
\
/* true if the SYCL device is available. Otherwise, return false. */ \
_(is_available) \
\
/* the maximum size in bytes of the arguments that can be passed to a \
* kernel. */ \
_(max_parameter_size) \
\
/* the number of parallel compute units available to the device. */ \
_(max_compute_units) \
\
/* the maximum dimensions that specify the global and local work-item IDs \
* used by the data parallel execution model. */ \
_(max_work_item_dimensions) \
\
/* the maximum number of workitems that are permitted in a work-group \
* executing a kernel on a single compute unit. */ \
_(max_work_group_size) \
\
/* the maximum number of subgroups in a work-group for any kernel executed \
* on the device. */ \
_(max_num_sub_groups) \
\
/* a std::vector of size_t containing the set of sub-group sizes supported \
* by the device. */ \
_(sub_group_sizes) \
\
/* the maximum configured clock frequency of this SYCL device in MHz. */ \
_(max_clock_frequency) \
\
/* the default compute device address space size specified as an unsigned \
* integer value in bits. Must return either 32 or 64. */ \
_(address_bits) \
\
/* the maximum size of memory object allocation in bytes. */ \
_(max_mem_alloc_size) \
\
/* the minimum value in bits of the largest supported SYCL built-in data \
* type if this SYCL device is not of device type \
* sycl::info::device_type::custom. */ \
_(mem_base_addr_align) \
\
/* a std::vector of info::fp_config describing the half/single/double \
* precision floating-point capability of this SYCL device. */ \
_(half_fp_config) \
_(single_fp_config) \
_(double_fp_config) \
\
/* the size of global device memory in bytes. */ \
_(global_mem_size) \
\
/* the type of global memory cache supported. */ \
_(global_mem_cache_type) \
\
/* the size of global memory cache in bytes. */ \
_(global_mem_cache_size) \
\
/* the size of global memory cache line in bytes. */ \
_(global_mem_cache_line_size) \
\
/* the type of local memory supported. */ \
_(local_mem_type) \
\
/* the size of local memory arena in bytes. */ \
_(local_mem_size) \
\
/* the maximum number of sub-devices that can be created when this device is \
* partitioned. */ \
_(partition_max_sub_devices) \
\
/* the resolution of device timer in nanoseconds. */ \
_(profiling_timer_resolution) \
\
/* the preferred native vector width size for built-in scalar types that can \
* be put into vectors. */ \
_(preferred_vector_width_char) \
_(preferred_vector_width_short) \
_(preferred_vector_width_int) \
_(preferred_vector_width_long) \
_(preferred_vector_width_float) \
_(preferred_vector_width_double) \
_(preferred_vector_width_half) \
\
/* the native ISA vector width. The vector width is defined as the number of \
* scalar elements that can be stored in the vector. */ \
_(native_vector_width_char) \
_(native_vector_width_short) \
_(native_vector_width_int) \
_(native_vector_width_long) \
_(native_vector_width_float) \
_(native_vector_width_double) \
_(native_vector_width_half)
#define AT_FORALL_XPU_EXT_DEVICE_PROPERTIES(_) \
/* the number of EUs associated with the Intel GPU. */ \
_(gpu_eu_count, 512) \
\
/* the number of EUs in a subslice. */ \
_(gpu_eu_count_per_subslice, 8) \
\
/* the simd width of EU of GPU. */ \
_(gpu_eu_simd_width, 8) \
\
/* the number of hardware threads per EU of GPU. */ \
_(gpu_hw_threads_per_eu, 8)
#define AT_FORALL_XPU_DEVICE_ASPECT(_) \
/* sycl::half is supported on device. */ \
_(fp16) \
\
/* double is supported on device. */ \
_(fp64) \
\
/* 64-bit atomic operation is supported on device. */ \
_(atomic64)
#define AT_FORALL_XPU_EXP_CL_ASPECT(_) \
/* conversion between single-precision 32-bit floating-point values and \
* 16-bit bfloat16 values is supported on device. */ \
_(bfloat16_conversions) \
\
/* specialized hardware to compute MMA is supported on device. */ \
_(subgroup_matrix_multiply_accumulate) \
\
/* specialized hardware to compute MMA for 32-bit floating-point is \
* supported on device. */ \
_(subgroup_matrix_multiply_accumulate_tensor_float32) \
\
/* block read operations for efficient matrix multiplication is supported on \
* device. */ \
_(subgroup_2d_block_io)
#define AT_FORALL_XPU_EXP_DEVICE_PROPERTIES(_) \
/* the device architecture of this SYCL device. */ \
_(architecture)
#define _DEFINE_SYCL_PROP(ns, property, member) \
ns::property::return_type member;
#define DEFINE_DEVICE_PROP(property) \
_DEFINE_SYCL_PROP(sycl::info::device, property, property)
#define DEFINE_PLATFORM_PROP(property, member) \
_DEFINE_SYCL_PROP(sycl::info::platform, property, member)
#define DEFINE_EXT_DEVICE_PROP(property, ...) \
_DEFINE_SYCL_PROP(sycl::ext::intel::info::device, property, property)
#define DEFINE_DEVICE_ASPECT(member) bool has_##member;
#define DEFINE_EXP_DEVICE_PROP(property) \
_DEFINE_SYCL_PROP( \
sycl::ext::oneapi::experimental::info::device, property, property)
struct C10_XPU_API DeviceProp {
AT_FORALL_XPU_DEVICE_PROPERTIES(DEFINE_DEVICE_PROP);
// the platform name.
DEFINE_PLATFORM_PROP(name, platform_name);
AT_FORALL_XPU_EXT_DEVICE_PROPERTIES(DEFINE_EXT_DEVICE_PROP);
AT_FORALL_XPU_DEVICE_ASPECT(DEFINE_DEVICE_ASPECT);
AT_FORALL_XPU_EXP_CL_ASPECT(DEFINE_DEVICE_ASPECT);
#if SYCL_COMPILER_VERSION >= 20250000
AT_FORALL_XPU_EXP_DEVICE_PROPERTIES(DEFINE_EXP_DEVICE_PROP);
#endif
};
#undef _DEFINE_SYCL_PROP
#undef DEFINE_DEVICE_PROP
#undef DEFINE_PLATFORM_PROP
#undef DEFINE_EXT_DEVICE_PROP
#undef DEFINE_DEVICE_ASPECT
#undef DEFINE_EXP_DEVICE_PROP
} // namespace c10::xpu
```
|
============================================================================================================================
SOURCE CODE FILE: XPUException.h
LINES: 1
SIZE: 0.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\c10\xpu\XPUException.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/Exception.h>
#include <sycl/sycl.hpp>
namespace c10::xpu {
static inline sycl::async_handler asyncHandler = [](sycl::exception_list el) {
if (el.size() == 0) {
return;
}
for (const auto& e : el) {
try {
std::rethrow_exception(e);
} catch (sycl::exception& e) {
TORCH_WARN("SYCL Exception: ", e.what());
}
}
throw;
};
} // namespace c10::xpu
```
|
============================================================================================================================
SOURCE CODE FILE: XPUFunctions.h
LINES: 1
SIZE: 1.28 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\c10\xpu\XPUFunctions.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/Device.h>
#include <c10/xpu/XPUDeviceProp.h>
#include <c10/xpu/XPUMacros.h>
// The naming convention used here matches the naming convention of torch.xpu
namespace c10::xpu {
// Log a warning only once if no devices are detected.
C10_XPU_API DeviceIndex device_count();
// Throws an error if no devices are detected.
C10_XPU_API DeviceIndex device_count_ensure_non_zero();
C10_XPU_API DeviceIndex current_device();
C10_XPU_API void set_device(DeviceIndex device);
C10_XPU_API DeviceIndex exchange_device(DeviceIndex device);
C10_XPU_API DeviceIndex maybe_exchange_device(DeviceIndex to_device);
C10_XPU_API sycl::device& get_raw_device(DeviceIndex device);
C10_XPU_API sycl::context& get_device_context();
C10_XPU_API void get_device_properties(
DeviceProp* device_prop,
DeviceIndex device);
C10_XPU_API DeviceIndex get_device_idx_from_pointer(void* ptr);
static inline void check_device_index(DeviceIndex device_index) {
TORCH_CHECK(
device_index >= 0 && device_index < c10::xpu::device_count(),
"The device index is out of range. It must be in [0, ",
static_cast<int>(c10::xpu::device_count()),
"), but got ",
static_cast<int>(device_index),
".");
}
} // namespace c10::xpu
```
|
=========================================================================================================================
SOURCE CODE FILE: XPUMacros.h
LINES: 1
SIZE: 0.88 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\c10\xpu\XPUMacros.h
ENCODING: utf-8
```h
#pragma once
#ifndef C10_USING_CUSTOM_GENERATED_MACROS
#include <c10/xpu/impl/xpu_cmake_macros.h>
#endif
// See c10/macros/Export.h for a detailed explanation of what the function
// of these macros are. We need one set of macros for every separate library
// we build.
#ifdef _WIN32
#if defined(C10_XPU_BUILD_SHARED_LIBS)
#define C10_XPU_EXPORT __declspec(dllexport)
#define C10_XPU_IMPORT __declspec(dllimport)
#else
#define C10_XPU_EXPORT
#define C10_XPU_IMPORT
#endif
#else // _WIN32
#if defined(__GNUC__)
#define C10_XPU_EXPORT __attribute__((__visibility__("default")))
#else // defined(__GNUC__)
#define C10_XPU_EXPORT
#endif // defined(__GNUC__)
#define C10_XPU_IMPORT C10_XPU_EXPORT
#endif // _WIN32
// This one is being used by libc10_xpu.so
#ifdef C10_XPU_BUILD_MAIN_LIB
#define C10_XPU_API C10_XPU_EXPORT
#else
#define C10_XPU_API C10_XPU_IMPORT
#endif
```
|
=========================================================================================================================
SOURCE CODE FILE: XPUStream.h
LINES: 1
SIZE: 6.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\c10\xpu\XPUStream.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/Stream.h>
#include <c10/core/impl/GPUTrace.h>
#include <c10/xpu/XPUFunctions.h>
namespace c10::xpu {
/*
* Note [Stream Management]
*
* An XPUStream is an abstraction of an actual SYCL queue in which SYCL kernel
* can execute. Currently, there are several pools per device to manage SYCL
* queue, and a device's pool is lazily created.
*
* There are two pools per device. The first pool contains "normal priority"
* queues. The second pool is the "high priority" queues. There are 32 queues in
* per pool per device, and when a queue is requested one of these queues is
* returned round-robin. That is, the first queue requested is at index 0, the
* second at index 1... to index 31, then index 0 again.
*
* This means that if 33 queues are requested, the first and last queues
* requested are actually the same queue (under the covers) and kernels enqueued
* on them cannot run concurrently.
*
* It is safe to enqueue a kernel on the same queue from two different
* threads as the SYCL specification described.
*/
static constexpr int max_compile_time_stream_priorities = 3;
/*
* This serves as a wrapper around c10::Stream and acts as a representation for
* a SYCL queue, which allows asynchronous execution of XPU tasks.
*/
class C10_XPU_API XPUStream {
public:
enum Unchecked { UNCHECKED };
/// Construct a XPUStream from a Stream. This construction is checked, and
/// will raise an error if the Stream is not, in fact, a XPU stream.
explicit XPUStream(Stream stream) : stream_(stream) {
TORCH_CHECK(stream_.device_type() == DeviceType::XPU);
}
/// Construct a XPUStream from a Stream with no error checking.
explicit XPUStream(Unchecked, Stream stream) : stream_(stream) {}
bool operator==(const XPUStream& other) const noexcept {
return unwrap() == other.unwrap();
}
bool operator!=(const XPUStream& other) const noexcept {
return unwrap() != other.unwrap();
}
/// Implicit conversion to sycl::queue&.
operator sycl::queue&() const {
return queue();
}
/// Implicit conversion to Stream (a.k.a., forget that the stream is a
/// XPU stream).
operator Stream() const {
return unwrap();
}
/// Get the XPU device type that this stream is associated with.
DeviceType device_type() const {
return DeviceType::XPU;
}
/// Get the XPU device index that this stream is associated with.
DeviceIndex device_index() const {
return stream_.device_index();
}
/// Get the full Device that this stream is associated with. The Device is
/// guaranteed to be a XPU device.
Device device() const {
return Device(DeviceType::XPU, device_index());
}
/// Return the stream ID corresponding to this particular stream. StreamId is
/// a int64_t representation generated by its type and index.
StreamId id() const {
return stream_.id();
}
/// Return true if all enqueued tasks in this stream have been completed,
/// otherwise return false.
bool query() const {
return queue().ext_oneapi_empty();
}
/// Performs a blocking wait for the completion of all enqueued tasks in this
/// stream.
void synchronize() const {
queue().wait_and_throw();
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_stream_synchronization(
c10::kXPU, reinterpret_cast<uintptr_t>(&queue()));
}
}
/// Return the priority that this stream is associated with. Lower numbers
/// represent higher priority.
int priority() const;
/// Explicit conversion to sycl::queue&.
sycl::queue& queue() const;
/// Explicit conversion to Stream.
Stream unwrap() const {
return stream_;
}
/// Reversibly pack a XPUStream into a struct representation. The XPUStream
/// can be unpacked using unpack3().
struct c10::StreamData3 pack3() const {
return stream_.pack3();
}
/// Unpack a XPUStream from the 3 fields generated by pack3().
static XPUStream unpack3(
StreamId stream_id,
DeviceIndex device_index,
DeviceType device_type) {
return XPUStream(Stream::unpack3(stream_id, device_index, device_type));
}
/// Return the range of priority **supported by PyTorch**.
static std::tuple<int, int> priority_range() {
// See Note [XPU Stream priorities]
return std::make_tuple(1, -max_compile_time_stream_priorities + 2);
}
private:
Stream stream_;
};
/**
* Get a stream from the pool in a round-robin fashion.
*
* You can request a stream from the highest priority pool by setting
* isHighPriority to true for a specific device.
*/
C10_XPU_API XPUStream
getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1);
/**
* Get a stream from the pool in a round-robin fashion.
*
* You can request a stream by setting a priority value for a specific device.
* The priority number lower, the priority higher.
*/
C10_XPU_API XPUStream
getStreamFromPool(const int priority, DeviceIndex device = -1);
/**
* Get an XPUStream from an external SYCL queue.
*
* This function allows interoperability with other libraries by enabling
* the use of an external SYCL queue that was not created by PyTorch. This
* can be useful for data exchange or other operations where integration
* with non-PyTorch queues is required.
*
* NOTE: It is the user's responsibility to ensure that the referenced SYCL
* queue remains alive while the corresponding XPUStream, or any c10::Stream
* derived from it, is in use. The different SYCL queue pointers will result in
* distinct XPUStream instances, even if the SYCL queues they dereference are
* equivalent.
*/
C10_XPU_API XPUStream
getStreamFromExternal(sycl::queue* ext_queue, DeviceIndex device_index);
/**
* Get the current XPU stream, for the passed XPU device, or for the current
* device if no device index is passed.
*/
C10_XPU_API XPUStream getCurrentXPUStream(DeviceIndex device = -1);
/**
* Set the current stream on the device of the passed in stream to be the passed
* in stream.
*/
C10_XPU_API void setCurrentXPUStream(XPUStream stream);
C10_XPU_API std::ostream& operator<<(std::ostream& stream, const XPUStream& s);
/**
* Block all reserved SYCL queues in the stream pools on the device, and wait
* for their synchronizations.
*/
C10_XPU_API void syncStreamsOnDevice(DeviceIndex device = -1);
} // namespace c10::xpu
namespace std {
template <>
struct hash<c10::xpu::XPUStream> {
size_t operator()(c10::xpu::XPUStream s) const noexcept {
return std::hash<c10::Stream>{}(s.unwrap());
}
};
} // namespace std
```
|
=================================================================================================================================
SOURCE CODE FILE: XPUGuardImpl.h
LINES: 1
SIZE: 6.98 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\c10\xpu\impl\XPUGuardImpl.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/DeviceGuard.h>
#include <c10/core/impl/DeviceGuardImplInterface.h>
#include <c10/core/impl/GPUTrace.h>
#include <c10/xpu/XPUCachingAllocator.h>
#include <c10/xpu/XPUFunctions.h>
#include <c10/xpu/XPUStream.h>
#include <vector>
namespace c10::xpu::impl {
struct XPUGuardImpl final : public c10::impl::DeviceGuardImplInterface {
static constexpr DeviceType static_type = kXPU;
XPUGuardImpl() = default;
explicit XPUGuardImpl(DeviceType t) {
TORCH_INTERNAL_ASSERT(t == kXPU);
}
DeviceType type() const override {
return kXPU;
}
Device exchangeDevice(Device d) const override {
TORCH_INTERNAL_ASSERT(d.is_xpu());
const auto old_device_index = c10::xpu::exchange_device(d.index());
return Device(kXPU, old_device_index);
}
Device getDevice() const override {
const auto device = c10::xpu::current_device();
return Device(kXPU, device);
}
void setDevice(Device d) const override {
TORCH_INTERNAL_ASSERT(d.is_xpu());
c10::xpu::set_device(d.index());
}
void uncheckedSetDevice(Device d) const noexcept override {
c10::xpu::set_device(d.index());
}
Stream getStream(Device d) const override {
return getCurrentXPUStream(d.index()).unwrap();
}
Stream getNewStream(Device d, int priority = 0) const override {
return getStreamFromPool(priority, d.index());
}
Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false)
const override {
return getStreamFromPool(isHighPriority, d.index());
}
// NB: These do NOT set the current device
Stream exchangeStream(Stream s) const override {
const XPUStream stream(s);
const auto old_stream = getCurrentXPUStream(s.device().index());
setCurrentXPUStream(stream);
return old_stream.unwrap();
}
DeviceIndex deviceCount() const noexcept override {
return c10::xpu::device_count();
}
// Event-related functions
void destroyEvent(void* event, const DeviceIndex device_index)
const noexcept override {
if (!event)
return;
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_deletion(
c10::kXPU, reinterpret_cast<uintptr_t>(event));
}
delete reinterpret_cast<sycl::event*>(event);
}
void record(
void** event,
const Stream& stream,
const DeviceIndex device_index,
const EventFlag flag) const override {
TORCH_CHECK(
device_index == -1 || device_index == stream.device_index(),
"Event device index ",
device_index,
" does not match recording stream's device index ",
stream.device_index(),
".");
auto* xpu_event = reinterpret_cast<sycl::event*>(*event);
const XPUStream xpu_stream{stream};
// Delete the event previously recorded.
if (xpu_event)
delete xpu_event;
#if SYCL_COMPILER_VERSION >= 20250000
if (flag == EventFlag::BACKEND_DEFAULT) {
// Use the profiling tag to record the event to enable timing feature.
xpu_event =
new sycl::event(sycl::ext::oneapi::experimental::submit_profiling_tag(
xpu_stream.queue()));
} else {
xpu_event =
new sycl::event(xpu_stream.queue().ext_oneapi_submit_barrier());
}
#else
xpu_event = new sycl::event(xpu_stream.queue().ext_oneapi_submit_barrier());
#endif
*event = reinterpret_cast<void*>(xpu_event);
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_record(
c10::kXPU,
reinterpret_cast<uintptr_t>(xpu_event),
reinterpret_cast<uintptr_t>(&xpu_stream.queue()));
}
}
void block(void* event, const Stream& stream) const override {
if (!event)
return;
auto* xpu_event = reinterpret_cast<sycl::event*>(event);
std::vector<sycl::event> event_list{*xpu_event};
const XPUStream xpu_stream(stream);
xpu_stream.queue().ext_oneapi_submit_barrier(event_list);
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_wait(
c10::kXPU,
reinterpret_cast<uintptr_t>(xpu_event),
reinterpret_cast<uintptr_t>(&xpu_stream.queue()));
}
}
bool queryEvent(void* event) const override {
using namespace sycl::info;
if (!event)
return true;
auto* xpu_event = reinterpret_cast<sycl::event*>(event);
return xpu_event->get_info<event::command_execution_status>() ==
event_command_status::complete;
}
double elapsedTime(
void* start_event,
void* end_event,
const DeviceIndex device_index) const override {
#if SYCL_COMPILER_VERSION < 20250000
TORCH_CHECK_NOT_IMPLEMENTED(
false,
"elapsedTime requires PyTorch to be built with SYCL compiler version 2025.0.0 or newer.");
#endif
TORCH_CHECK(
start_event && end_event,
"Both events must be recorded before calculating elapsed time.");
auto* xpu_start_event = reinterpret_cast<sycl::event*>(start_event);
auto* xpu_end_event = reinterpret_cast<sycl::event*>(end_event);
using namespace sycl::info::event_profiling;
// Block until both of the recorded events are completed.
uint64_t end_time_ns = xpu_end_event->get_profiling_info<command_end>();
uint64_t start_time_ns = xpu_start_event->get_profiling_info<command_end>();
// Return the eplased time in milliseconds.
return 1e-6 *
(static_cast<double>(end_time_ns) - static_cast<double>(start_time_ns));
}
// Stream-related functions
bool queryStream(const Stream& stream) const override {
const XPUStream xpu_stream{stream};
return xpu_stream.query();
}
void synchronizeStream(const Stream& stream) const override {
const XPUStream xpu_stream{stream};
xpu_stream.synchronize();
}
void synchronizeEvent(void* event) const override {
if (!event)
return;
auto* xpu_event = reinterpret_cast<sycl::event*>(event);
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_synchronization(
c10::kXPU, reinterpret_cast<uintptr_t>(xpu_event));
}
xpu_event->wait_and_throw();
}
void synchronizeDevice(const c10::DeviceIndex device_index) const override {
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_device_synchronization(c10::kXPU);
}
c10::xpu::syncStreamsOnDevice(device_index);
}
void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream)
const override {
const XPUStream xpu_stream{stream};
XPUCachingAllocator::recordStream(data_ptr, xpu_stream);
}
};
} // namespace c10::xpu::impl
```
|
================================================================================================================================
SOURCE CODE FILE: crc_alt.h
LINES: 1
SIZE: 75.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\caffe2\serialize\crc_alt.h
ENCODING: utf-8
```h
#pragma once
// //////////////////////////////////////////////////////////
// Crc32.h
// Copyright (c) 2011-2019 Stephan Brumme. All rights reserved.
// Slicing-by-16 contributed by Bulat Ziganshin
// Tableless bytewise CRC contributed by Hagai Gold
// see http://create.stephan-brumme.com/disclaimer.html
//
// if running on an embedded system, you might consider shrinking the
// big Crc32Lookup table by undefining these lines:
#define CRC32_USE_LOOKUP_TABLE_BYTE
#define CRC32_USE_LOOKUP_TABLE_SLICING_BY_4
#define CRC32_USE_LOOKUP_TABLE_SLICING_BY_8
#define CRC32_USE_LOOKUP_TABLE_SLICING_BY_16
// - crc32_bitwise doesn't need it at all
// - crc32_halfbyte has its own small lookup table
// - crc32_1byte_tableless and crc32_1byte_tableless2 don't need it at all
// - crc32_1byte needs only Crc32Lookup[0]
// - crc32_4bytes needs only Crc32Lookup[0..3]
// - crc32_8bytes needs only Crc32Lookup[0..7]
// - crc32_4x8bytes needs only Crc32Lookup[0..7]
// - crc32_16bytes needs all of Crc32Lookup
// using the aforementioned #defines the table is automatically fitted to your needs
// uint8_t, uint32_t, int32_t
#include <stdint.h>
// size_t
#include <cstddef>
// crc32_fast selects the fastest algorithm depending on flags (CRC32_USE_LOOKUP_...)
/// compute CRC32 using the fastest algorithm for large datasets on modern CPUs
uint32_t crc32_fast (const void* data, size_t length, uint32_t previousCrc32 = 0);
/// merge two CRC32 such that result = crc32(dataB, lengthB, crc32(dataA, lengthA))
uint32_t crc32_combine (uint32_t crcA, uint32_t crcB, size_t lengthB);
/// compute CRC32 (bitwise algorithm)
uint32_t crc32_bitwise (const void* data, size_t length, uint32_t previousCrc32 = 0);
/// compute CRC32 (half-byte algoritm)
uint32_t crc32_halfbyte(const void* data, size_t length, uint32_t previousCrc32 = 0);
#ifdef CRC32_USE_LOOKUP_TABLE_BYTE
/// compute CRC32 (standard algorithm)
uint32_t crc32_1byte (const void* data, size_t length, uint32_t previousCrc32 = 0);
#endif
/// compute CRC32 (byte algorithm) without lookup tables
uint32_t crc32_1byte_tableless (const void* data, size_t length, uint32_t previousCrc32 = 0);
/// compute CRC32 (byte algorithm) without lookup tables
uint32_t crc32_1byte_tableless2(const void* data, size_t length, uint32_t previousCrc32 = 0);
#ifdef CRC32_USE_LOOKUP_TABLE_SLICING_BY_4
/// compute CRC32 (Slicing-by-4 algorithm)
uint32_t crc32_4bytes (const void* data, size_t length, uint32_t previousCrc32 = 0);
#endif
#ifdef CRC32_USE_LOOKUP_TABLE_SLICING_BY_8
/// compute CRC32 (Slicing-by-8 algorithm)
uint32_t crc32_8bytes (const void* data, size_t length, uint32_t previousCrc32 = 0);
/// compute CRC32 (Slicing-by-8 algorithm), unroll inner loop 4 times
uint32_t crc32_4x8bytes(const void* data, size_t length, uint32_t previousCrc32 = 0);
#endif
#ifdef CRC32_USE_LOOKUP_TABLE_SLICING_BY_16
/// compute CRC32 (Slicing-by-16 algorithm)
uint32_t crc32_16bytes (const void* data, size_t length, uint32_t previousCrc32 = 0);
/// compute CRC32 (Slicing-by-16 algorithm, prefetch upcoming data blocks)
uint32_t crc32_16bytes_prefetch(const void* data, size_t length, uint32_t previousCrc32 = 0, size_t prefetchAhead = 256);
#endif
// //////////////////////////////////////////////////////////
// Crc32.cpp
// Copyright (c) 2011-2019 Stephan Brumme. All rights reserved.
// Slicing-by-16 contributed by Bulat Ziganshin
// Tableless bytewise CRC contributed by Hagai Gold
// see http://create.stephan-brumme.com/disclaimer.html
//
// if running on an embedded system, you might consider shrinking the
// big Crc32Lookup table:
// - crc32_bitwise doesn't need it at all
// - crc32_halfbyte has its own small lookup table
// - crc32_1byte needs only Crc32Lookup[0]
// - crc32_4bytes needs only Crc32Lookup[0..3]
// - crc32_8bytes needs only Crc32Lookup[0..7]
// - crc32_4x8bytes needs only Crc32Lookup[0..7]
// - crc32_16bytes needs all of Crc32Lookup
#ifndef __LITTLE_ENDIAN
#define __LITTLE_ENDIAN 1234
#endif
#ifndef __BIG_ENDIAN
#define __BIG_ENDIAN 4321
#endif
// define endianess and some integer data types
#if defined(_MSC_VER) || defined(__MINGW32__)
// Windows always little endian
#define __BYTE_ORDER __LITTLE_ENDIAN
// intrinsics / prefetching
#if defined(_M_ARM64)
#include <intrin.h>
#else
#include <xmmintrin.h>
#endif
#ifdef __MINGW32__
#define PREFETCH(location) __builtin_prefetch(location)
#else
#if defined(_M_ARM64)
#define PREFETCH(location) __prefetch(location)
#else
#define PREFETCH(location) _mm_prefetch(location, _MM_HINT_T0)
#endif
#endif
#elif defined(__APPLE__)
#include <TargetConditionals.h>
#if TARGET_IPHONE_SIMULATOR
#define __BYTE_ORDER __LITTLE_ENDIAN
#elif TARGET_OS_IPHONE
#define __BYTE_ORDER __LITTLE_ENDIAN
#elif TARGET_OS_MAC
#include <machine/endian.h>
#if defined(__BIG_ENDIAN__)
#define __BYTE_ORDER __BIG_ENDIAN
#endif
#if defined(__LITTLE_ENDIAN__)
#define __BYTE_ORDER __LITTLE_ENDIAN
#endif
#else
# error "Unknown Apple platform"
#endif
#elif defined(__ARMEB__)
#define __BYTE_ORDER __BIG_ENDIAN
#elif (defined(__BYTE_ORDER__) and !defined(__BYTE_ORDER))
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define __BYTE_ORDER __BIG_ENDIAN
#else
#define __BYTE_ORDER __LITTLE_ENDIAN
#endif
#else
// defines __BYTE_ORDER as __LITTLE_ENDIAN or __BIG_ENDIAN
#include <sys/param.h>
#endif
// intrinsics / prefetching
#ifdef __GNUC__
#define PREFETCH(location) __builtin_prefetch(location)
#else
#ifndef PREFETCH
// no prefetching
#define PREFETCH(location) ;
#endif
#endif
// abort if byte order is undefined
#ifndef __BYTE_ORDER
#error undefined byte order, compile with -D__BYTE_ORDER=1234 (if little endian) or -D__BYTE_ORDER=4321 (big endian)
#endif
namespace
{
/// zlib's CRC32 polynomial
const uint32_t Polynomial = 0xEDB88320;
/// swap endianess
static inline uint32_t swap(uint32_t x)
{
#if defined(__GNUC__) || defined(__clang__)
return __builtin_bswap32(x);
#else
return (x >> 24) |
((x >> 8) & 0x0000FF00) |
((x << 8) & 0x00FF0000) |
(x << 24);
#endif
}
/// Slicing-By-16
#ifdef CRC32_USE_LOOKUP_TABLE_SLICING_BY_16
const size_t MaxSlice = 16;
#elif defined(CRC32_USE_LOOKUP_TABLE_SLICING_BY_8)
const size_t MaxSlice = 8;
#elif defined(CRC32_USE_LOOKUP_TABLE_SLICING_BY_4)
const size_t MaxSlice = 4;
#elif defined(CRC32_USE_LOOKUP_TABLE_BYTE)
const size_t MaxSlice = 1;
#else
#define NO_LUT // don't need Crc32Lookup at all
#endif
} // anonymous namespace
#ifndef NO_LUT
/// forward declaration, table is at the end of this file
extern const uint32_t Crc32Lookup[MaxSlice][256]; // extern is needed to keep compiler happy
#endif
/// compute CRC32 (bitwise algorithm)
uint32_t crc32_bitwise(const void* data, size_t length, uint32_t previousCrc32)
{
uint32_t crc = ~previousCrc32; // same as previousCrc32 ^ 0xFFFFFFFF
const uint8_t* current = (const uint8_t*) data;
while (length-- != 0)
{
crc ^= *current++;
for (int j = 0; j < 8; j++)
{
// branch-free
crc = (crc >> 1) ^ (-int32_t(crc & 1) & Polynomial);
// branching, much slower:
//if (crc & 1)
// crc = (crc >> 1) ^ Polynomial;
//else
// crc = crc >> 1;
}
}
return ~crc; // same as crc ^ 0xFFFFFFFF
}
/// compute CRC32 (half-byte algoritm)
uint32_t crc32_halfbyte(const void* data, size_t length, uint32_t previousCrc32)
{
uint32_t crc = ~previousCrc32; // same as previousCrc32 ^ 0xFFFFFFFF
const uint8_t* current = (const uint8_t*) data;
/// look-up table for half-byte, same as crc32Lookup[0][16*i]
static const uint32_t Crc32Lookup16[16] =
{
0x00000000,0x1DB71064,0x3B6E20C8,0x26D930AC,0x76DC4190,0x6B6B51F4,0x4DB26158,0x5005713C,
0xEDB88320,0xF00F9344,0xD6D6A3E8,0xCB61B38C,0x9B64C2B0,0x86D3D2D4,0xA00AE278,0xBDBDF21C
};
while (length-- != 0)
{
crc = Crc32Lookup16[(crc ^ *current ) & 0x0F] ^ (crc >> 4);
crc = Crc32Lookup16[(crc ^ (*current >> 4)) & 0x0F] ^ (crc >> 4);
current++;
}
return ~crc; // same as crc ^ 0xFFFFFFFF
}
#ifdef CRC32_USE_LOOKUP_TABLE_BYTE
/// compute CRC32 (standard algorithm)
uint32_t crc32_1byte(const void* data, size_t length, uint32_t previousCrc32)
{
uint32_t crc = ~previousCrc32; // same as previousCrc32 ^ 0xFFFFFFFF
const uint8_t* current = (const uint8_t*) data;
while (length-- != 0)
crc = (crc >> 8) ^ Crc32Lookup[0][(crc & 0xFF) ^ *current++];
return ~crc; // same as crc ^ 0xFFFFFFFF
}
#endif
/// compute CRC32 (byte algorithm) without lookup tables
uint32_t crc32_1byte_tableless(const void* data, size_t length, uint32_t previousCrc32)
{
uint32_t crc = ~previousCrc32; // same as previousCrc32 ^ 0xFFFFFFFF
const uint8_t* current = (const uint8_t*) data;
while (length-- != 0)
{
uint8_t s = uint8_t(crc) ^ *current++;
// Hagai Gold made me aware of this table-less algorithm and send me code
// polynomial 0xEDB88320 can be written in binary as 11101101101110001000001100100000b
// reverse the bits (or just assume bit 0 is the first one)
// and we have bits set at position 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26
// => those are the shift offsets:
//crc = (crc >> 8) ^
// t ^
// (t >> 1) ^ (t >> 2) ^ (t >> 4) ^ (t >> 5) ^ // == y
// (t >> 7) ^ (t >> 8) ^ (t >> 10) ^ (t >> 11) ^ // == y >> 6
// (t >> 12) ^ (t >> 16) ^ // == z
// (t >> 22) ^ (t >> 26) ^ // == z >> 10
// (t >> 23);
// the fastest I can come up with:
uint32_t low = (s ^ (s << 6)) & 0xFF;
uint32_t a = (low * ((1 << 23) + (1 << 14) + (1 << 2)));
crc = (crc >> 8) ^
(low * ((1 << 24) + (1 << 16) + (1 << 8))) ^
a ^
(a >> 1) ^
(low * ((1 << 20) + (1 << 12) )) ^
(low << 19) ^
(low << 17) ^
(low >> 2);
// Hagai's code:
/*uint32_t t = (s ^ (s << 6)) << 24;
// some temporaries to optimize XOR
uint32_t x = (t >> 1) ^ (t >> 2);
uint32_t y = x ^ (x >> 3);
uint32_t z = (t >> 12) ^ (t >> 16);
crc = (crc >> 8) ^
t ^ (t >> 23) ^
y ^ (y >> 6) ^
z ^ (z >> 10);*/
}
return ~crc; // same as crc ^ 0xFFFFFFFF
}
/// compute CRC32 (byte algorithm) without lookup tables
uint32_t crc32_1byte_tableless2(const void* data, size_t length, uint32_t previousCrc32)
{
int32_t crc = ~previousCrc32; // note: signed integer, right shift distributes sign bit into lower bits
const uint8_t* current = (const uint8_t*) data;
while (length-- != 0)
{
crc = crc ^ *current++;
uint32_t c = (((crc << 31) >> 31) & ((Polynomial >> 7) ^ (Polynomial >> 1))) ^
(((crc << 30) >> 31) & ((Polynomial >> 6) ^ Polynomial)) ^
(((crc << 29) >> 31) & (Polynomial >> 5)) ^
(((crc << 28) >> 31) & (Polynomial >> 4)) ^
(((crc << 27) >> 31) & (Polynomial >> 3)) ^
(((crc << 26) >> 31) & (Polynomial >> 2)) ^
(((crc << 25) >> 31) & (Polynomial >> 1)) ^
(((crc << 24) >> 31) & Polynomial);
crc = ((uint32_t)crc >> 8) ^ c; // convert to unsigned integer before right shift
}
return ~crc; // same as crc ^ 0xFFFFFFFF
}
#ifdef CRC32_USE_LOOKUP_TABLE_SLICING_BY_4
/// compute CRC32 (Slicing-by-4 algorithm)
uint32_t crc32_4bytes(const void* data, size_t length, uint32_t previousCrc32)
{
uint32_t crc = ~previousCrc32; // same as previousCrc32 ^ 0xFFFFFFFF
const uint32_t* current = (const uint32_t*) data;
// process four bytes at once (Slicing-by-4)
while (length >= 4)
{
#if __BYTE_ORDER == __BIG_ENDIAN
uint32_t one = *current++ ^ swap(crc);
crc = Crc32Lookup[0][ one & 0xFF] ^
Crc32Lookup[1][(one>> 8) & 0xFF] ^
Crc32Lookup[2][(one>>16) & 0xFF] ^
Crc32Lookup[3][(one>>24) & 0xFF];
#else
uint32_t one = *current++ ^ crc;
crc = Crc32Lookup[0][(one>>24) & 0xFF] ^
Crc32Lookup[1][(one>>16) & 0xFF] ^
Crc32Lookup[2][(one>> 8) & 0xFF] ^
Crc32Lookup[3][ one & 0xFF];
#endif
length -= 4;
}
const uint8_t* currentChar = (const uint8_t*) current;
// remaining 1 to 3 bytes (standard algorithm)
while (length-- != 0)
crc = (crc >> 8) ^ Crc32Lookup[0][(crc & 0xFF) ^ *currentChar++];
return ~crc; // same as crc ^ 0xFFFFFFFF
}
#endif
#ifdef CRC32_USE_LOOKUP_TABLE_SLICING_BY_8
/// compute CRC32 (Slicing-by-8 algorithm)
uint32_t crc32_8bytes(const void* data, size_t length, uint32_t previousCrc32)
{
uint32_t crc = ~previousCrc32; // same as previousCrc32 ^ 0xFFFFFFFF
const uint32_t* current = (const uint32_t*) data;
// process eight bytes at once (Slicing-by-8)
while (length >= 8)
{
#if __BYTE_ORDER == __BIG_ENDIAN
uint32_t one = *current++ ^ swap(crc);
uint32_t two = *current++;
crc = Crc32Lookup[0][ two & 0xFF] ^
Crc32Lookup[1][(two>> 8) & 0xFF] ^
Crc32Lookup[2][(two>>16) & 0xFF] ^
Crc32Lookup[3][(two>>24) & 0xFF] ^
Crc32Lookup[4][ one & 0xFF] ^
Crc32Lookup[5][(one>> 8) & 0xFF] ^
Crc32Lookup[6][(one>>16) & 0xFF] ^
Crc32Lookup[7][(one>>24) & 0xFF];
#else
uint32_t one = *current++ ^ crc;
uint32_t two = *current++;
crc = Crc32Lookup[0][(two>>24) & 0xFF] ^
Crc32Lookup[1][(two>>16) & 0xFF] ^
Crc32Lookup[2][(two>> 8) & 0xFF] ^
Crc32Lookup[3][ two & 0xFF] ^
Crc32Lookup[4][(one>>24) & 0xFF] ^
Crc32Lookup[5][(one>>16) & 0xFF] ^
Crc32Lookup[6][(one>> 8) & 0xFF] ^
Crc32Lookup[7][ one & 0xFF];
#endif
length -= 8;
}
const uint8_t* currentChar = (const uint8_t*) current;
// remaining 1 to 7 bytes (standard algorithm)
while (length-- != 0)
crc = (crc >> 8) ^ Crc32Lookup[0][(crc & 0xFF) ^ *currentChar++];
return ~crc; // same as crc ^ 0xFFFFFFFF
}
/// compute CRC32 (Slicing-by-8 algorithm), unroll inner loop 4 times
uint32_t crc32_4x8bytes(const void* data, size_t length, uint32_t previousCrc32)
{
uint32_t crc = ~previousCrc32; // same as previousCrc32 ^ 0xFFFFFFFF
const uint32_t* current = (const uint32_t*) data;
// enabling optimization (at least -O2) automatically unrolls the inner for-loop
const size_t Unroll = 4;
const size_t BytesAtOnce = 8 * Unroll;
// process 4x eight bytes at once (Slicing-by-8)
while (length >= BytesAtOnce)
{
for (size_t unrolling = 0; unrolling < Unroll; unrolling++)
{
#if __BYTE_ORDER == __BIG_ENDIAN
uint32_t one = *current++ ^ swap(crc);
uint32_t two = *current++;
crc = Crc32Lookup[0][ two & 0xFF] ^
Crc32Lookup[1][(two>> 8) & 0xFF] ^
Crc32Lookup[2][(two>>16) & 0xFF] ^
Crc32Lookup[3][(two>>24) & 0xFF] ^
Crc32Lookup[4][ one & 0xFF] ^
Crc32Lookup[5][(one>> 8) & 0xFF] ^
Crc32Lookup[6][(one>>16) & 0xFF] ^
Crc32Lookup[7][(one>>24) & 0xFF];
#else
uint32_t one = *current++ ^ crc;
uint32_t two = *current++;
crc = Crc32Lookup[0][(two>>24) & 0xFF] ^
Crc32Lookup[1][(two>>16) & 0xFF] ^
Crc32Lookup[2][(two>> 8) & 0xFF] ^
Crc32Lookup[3][ two & 0xFF] ^
Crc32Lookup[4][(one>>24) & 0xFF] ^
Crc32Lookup[5][(one>>16) & 0xFF] ^
Crc32Lookup[6][(one>> 8) & 0xFF] ^
Crc32Lookup[7][ one & 0xFF];
#endif
}
length -= BytesAtOnce;
}
const uint8_t* currentChar = (const uint8_t*) current;
// remaining 1 to 31 bytes (standard algorithm)
while (length-- != 0)
crc = (crc >> 8) ^ Crc32Lookup[0][(crc & 0xFF) ^ *currentChar++];
return ~crc; // same as crc ^ 0xFFFFFFFF
}
#endif // CRC32_USE_LOOKUP_TABLE_SLICING_BY_8
#ifdef CRC32_USE_LOOKUP_TABLE_SLICING_BY_16
/// compute CRC32 (Slicing-by-16 algorithm)
uint32_t crc32_16bytes(const void* data, size_t length, uint32_t previousCrc32)
{
uint32_t crc = ~previousCrc32; // same as previousCrc32 ^ 0xFFFFFFFF
const uint32_t* current = (const uint32_t*) data;
// enabling optimization (at least -O2) automatically unrolls the inner for-loop
const size_t Unroll = 4;
const size_t BytesAtOnce = 16 * Unroll;
while (length >= BytesAtOnce)
{
for (size_t unrolling = 0; unrolling < Unroll; unrolling++)
{
#if __BYTE_ORDER == __BIG_ENDIAN
uint32_t one = *current++ ^ swap(crc);
uint32_t two = *current++;
uint32_t three = *current++;
uint32_t four = *current++;
crc = Crc32Lookup[ 0][ four & 0xFF] ^
Crc32Lookup[ 1][(four >> 8) & 0xFF] ^
Crc32Lookup[ 2][(four >> 16) & 0xFF] ^
Crc32Lookup[ 3][(four >> 24) & 0xFF] ^
Crc32Lookup[ 4][ three & 0xFF] ^
Crc32Lookup[ 5][(three >> 8) & 0xFF] ^
Crc32Lookup[ 6][(three >> 16) & 0xFF] ^
Crc32Lookup[ 7][(three >> 24) & 0xFF] ^
Crc32Lookup[ 8][ two & 0xFF] ^
Crc32Lookup[ 9][(two >> 8) & 0xFF] ^
Crc32Lookup[10][(two >> 16) & 0xFF] ^
Crc32Lookup[11][(two >> 24) & 0xFF] ^
Crc32Lookup[12][ one & 0xFF] ^
Crc32Lookup[13][(one >> 8) & 0xFF] ^
Crc32Lookup[14][(one >> 16) & 0xFF] ^
Crc32Lookup[15][(one >> 24) & 0xFF];
#else
uint32_t one = *current++ ^ crc;
uint32_t two = *current++;
uint32_t three = *current++;
uint32_t four = *current++;
crc = Crc32Lookup[ 0][(four >> 24) & 0xFF] ^
Crc32Lookup[ 1][(four >> 16) & 0xFF] ^
Crc32Lookup[ 2][(four >> 8) & 0xFF] ^
Crc32Lookup[ 3][ four & 0xFF] ^
Crc32Lookup[ 4][(three >> 24) & 0xFF] ^
Crc32Lookup[ 5][(three >> 16) & 0xFF] ^
Crc32Lookup[ 6][(three >> 8) & 0xFF] ^
Crc32Lookup[ 7][ three & 0xFF] ^
Crc32Lookup[ 8][(two >> 24) & 0xFF] ^
Crc32Lookup[ 9][(two >> 16) & 0xFF] ^
Crc32Lookup[10][(two >> 8) & 0xFF] ^
Crc32Lookup[11][ two & 0xFF] ^
Crc32Lookup[12][(one >> 24) & 0xFF] ^
Crc32Lookup[13][(one >> 16) & 0xFF] ^
Crc32Lookup[14][(one >> 8) & 0xFF] ^
Crc32Lookup[15][ one & 0xFF];
#endif
}
length -= BytesAtOnce;
}
const uint8_t* currentChar = (const uint8_t*) current;
// remaining 1 to 63 bytes (standard algorithm)
while (length-- != 0)
crc = (crc >> 8) ^ Crc32Lookup[0][(crc & 0xFF) ^ *currentChar++];
return ~crc; // same as crc ^ 0xFFFFFFFF
}
/// compute CRC32 (Slicing-by-16 algorithm, prefetch upcoming data blocks)
uint32_t crc32_16bytes_prefetch(const void* data, size_t length, uint32_t previousCrc32, size_t prefetchAhead)
{
// CRC code is identical to crc32_16bytes (including unrolling), only added prefetching
// 256 bytes look-ahead seems to be the sweet spot on Core i7 CPUs
uint32_t crc = ~previousCrc32; // same as previousCrc32 ^ 0xFFFFFFFF
const uint32_t* current = (const uint32_t*) data;
// enabling optimization (at least -O2) automatically unrolls the for-loop
const size_t Unroll = 4;
const size_t BytesAtOnce = 16 * Unroll;
while (length >= BytesAtOnce + prefetchAhead)
{
PREFETCH(((const char*) current) + prefetchAhead);
for (size_t unrolling = 0; unrolling < Unroll; unrolling++)
{
#if __BYTE_ORDER == __BIG_ENDIAN
uint32_t one = *current++ ^ swap(crc);
uint32_t two = *current++;
uint32_t three = *current++;
uint32_t four = *current++;
crc = Crc32Lookup[ 0][ four & 0xFF] ^
Crc32Lookup[ 1][(four >> 8) & 0xFF] ^
Crc32Lookup[ 2][(four >> 16) & 0xFF] ^
Crc32Lookup[ 3][(four >> 24) & 0xFF] ^
Crc32Lookup[ 4][ three & 0xFF] ^
Crc32Lookup[ 5][(three >> 8) & 0xFF] ^
Crc32Lookup[ 6][(three >> 16) & 0xFF] ^
Crc32Lookup[ 7][(three >> 24) & 0xFF] ^
Crc32Lookup[ 8][ two & 0xFF] ^
Crc32Lookup[ 9][(two >> 8) & 0xFF] ^
Crc32Lookup[10][(two >> 16) & 0xFF] ^
Crc32Lookup[11][(two >> 24) & 0xFF] ^
Crc32Lookup[12][ one & 0xFF] ^
Crc32Lookup[13][(one >> 8) & 0xFF] ^
Crc32Lookup[14][(one >> 16) & 0xFF] ^
Crc32Lookup[15][(one >> 24) & 0xFF];
#else
uint32_t one = *current++ ^ crc;
uint32_t two = *current++;
uint32_t three = *current++;
uint32_t four = *current++;
crc = Crc32Lookup[ 0][(four >> 24) & 0xFF] ^
Crc32Lookup[ 1][(four >> 16) & 0xFF] ^
Crc32Lookup[ 2][(four >> 8) & 0xFF] ^
Crc32Lookup[ 3][ four & 0xFF] ^
Crc32Lookup[ 4][(three >> 24) & 0xFF] ^
Crc32Lookup[ 5][(three >> 16) & 0xFF] ^
Crc32Lookup[ 6][(three >> 8) & 0xFF] ^
Crc32Lookup[ 7][ three & 0xFF] ^
Crc32Lookup[ 8][(two >> 24) & 0xFF] ^
Crc32Lookup[ 9][(two >> 16) & 0xFF] ^
Crc32Lookup[10][(two >> 8) & 0xFF] ^
Crc32Lookup[11][ two & 0xFF] ^
Crc32Lookup[12][(one >> 24) & 0xFF] ^
Crc32Lookup[13][(one >> 16) & 0xFF] ^
Crc32Lookup[14][(one >> 8) & 0xFF] ^
Crc32Lookup[15][ one & 0xFF];
#endif
}
length -= BytesAtOnce;
}
const uint8_t* currentChar = (const uint8_t*) current;
// remaining 1 to 63 bytes (standard algorithm)
while (length-- != 0)
crc = (crc >> 8) ^ Crc32Lookup[0][(crc & 0xFF) ^ *currentChar++];
return ~crc; // same as crc ^ 0xFFFFFFFF
}
#endif
/// compute CRC32 using the fastest algorithm for large datasets on modern CPUs
uint32_t crc32_fast(const void* data, size_t length, uint32_t previousCrc32)
{
#ifdef CRC32_USE_LOOKUP_TABLE_SLICING_BY_16
return crc32_16bytes (data, length, previousCrc32);
#elif defined(CRC32_USE_LOOKUP_TABLE_SLICING_BY_8)
return crc32_8bytes (data, length, previousCrc32);
#elif defined(CRC32_USE_LOOKUP_TABLE_SLICING_BY_4)
return crc32_4bytes (data, length, previousCrc32);
#elif defined(CRC32_USE_LOOKUP_TABLE_BYTE)
return crc32_1byte (data, length, previousCrc32);
#else
return crc32_halfbyte(data, length, previousCrc32);
#endif
}
/// merge two CRC32 such that result = crc32(dataB, lengthB, crc32(dataA, lengthA))
uint32_t crc32_combine(uint32_t crcA, uint32_t crcB, size_t lengthB)
{
// based on Mark Adler's crc_combine from
// https://github.com/madler/pigz/blob/master/pigz.c
// main idea:
// - if you have two equally-sized blocks A and B,
// then you can create a block C = A ^ B
// which has the property crc(C) = crc(A) ^ crc(B)
// - if you append length(B) zeros to A and call it A' (think of it as AAAA000)
// and prepend length(A) zeros to B and call it B' (think of it as 0000BBB)
// then exists a C' = A' ^ B'
// - remember: if you XOR someting with zero, it remains unchanged: X ^ 0 = X
// - that means C' = A concat B so that crc(A concat B) = crc(C') = crc(A') ^ crc(B')
// - the trick is to compute crc(A') based on crc(A)
// and crc(B') based on crc(B)
// - since B' starts with many zeros, the crc of those initial zeros is still zero
// - that means crc(B') = crc(B)
// - unfortunately the trailing zeros of A' change the crc, so usually crc(A') != crc(A)
// - the following code is a fast algorithm to compute crc(A')
// - starting with crc(A) and appending length(B) zeros, needing just log2(length(B)) iterations
// - the details are explained by the original author at
// https://stackoverflow.com/questions/23122312/crc-calculation-of-a-mostly-static-data-stream/23126768
//
// notes:
// - I squeezed everything into one function to keep global namespace clean (original code two helper functions)
// - most original comments are still in place, I added comments where these helper functions where made inline code
// - performance-wise there isn't any differenze to the original zlib/pigz code
// degenerated case
if (lengthB == 0)
return crcA;
/// CRC32 => 32 bits
const uint32_t CrcBits = 32;
uint32_t odd [CrcBits]; // odd-power-of-two zeros operator
uint32_t even[CrcBits]; // even-power-of-two zeros operator
// put operator for one zero bit in odd
odd[0] = Polynomial; // CRC-32 polynomial
for (uint32_t i = 1; i < CrcBits; i++)
odd[i] = 1 << (i - 1);
// put operator for two zero bits in even
// same as gf2_matrix_square(even, odd);
for (uint32_t i = 0; i < CrcBits; i++)
{
uint32_t vec = odd[i];
even[i] = 0;
for (int j = 0; vec != 0; j++, vec >>= 1)
if (vec & 1)
even[i] ^= odd[j];
}
// put operator for four zero bits in odd
// same as gf2_matrix_square(odd, even);
for (uint32_t i = 0; i < CrcBits; i++)
{
uint32_t vec = even[i];
odd[i] = 0;
for (int j = 0; vec != 0; j++, vec >>= 1)
if (vec & 1)
odd[i] ^= even[j];
}
// the following loop becomes much shorter if I keep swapping even and odd
uint32_t* a = even;
uint32_t* b = odd;
// apply secondLength zeros to firstCrc32
for (; lengthB > 0; lengthB >>= 1)
{
// same as gf2_matrix_square(a, b);
for (uint32_t i = 0; i < CrcBits; i++)
{
uint32_t vec = b[i];
a[i] = 0;
for (int j = 0; vec != 0; j++, vec >>= 1)
if (vec & 1)
a[i] ^= b[j];
}
// apply zeros operator for this bit
if (lengthB & 1)
{
// same as firstCrc32 = gf2_matrix_times(a, firstCrc32);
uint32_t sum = 0;
for (int i = 0; crcA != 0; i++, crcA >>= 1)
if (crcA & 1)
sum ^= a[i];
crcA = sum;
}
// switch even and odd
uint32_t* t = a; a = b; b = t;
}
// return combined crc
return crcA ^ crcB;
}
// //////////////////////////////////////////////////////////
// constants
#ifndef NO_LUT
/// look-up table, already declared above
const uint32_t Crc32Lookup[MaxSlice][256] =
{
//// same algorithm as crc32_bitwise
//for (int i = 0; i <= 0xFF; i++)
//{
// uint32_t crc = i;
// for (int j = 0; j < 8; j++)
// crc = (crc >> 1) ^ ((crc & 1) * Polynomial);
// Crc32Lookup[0][i] = crc;
//}
//// ... and the following slicing-by-8 algorithm (from Intel):
//// http://www.intel.com/technology/comms/perfnet/download/CRC_generators.pdf
//// http://sourceforge.net/projects/slicing-by-8/
//for (int slice = 1; slice < MaxSlice; slice++)
// Crc32Lookup[slice][i] = (Crc32Lookup[slice - 1][i] >> 8) ^ Crc32Lookup[0][Crc32Lookup[slice - 1][i] & 0xFF];
{
// note: the first number of every second row corresponds to the half-byte look-up table !
0x00000000,0x77073096,0xEE0E612C,0x990951BA,0x076DC419,0x706AF48F,0xE963A535,0x9E6495A3,
0x0EDB8832,0x79DCB8A4,0xE0D5E91E,0x97D2D988,0x09B64C2B,0x7EB17CBD,0xE7B82D07,0x90BF1D91,
0x1DB71064,0x6AB020F2,0xF3B97148,0x84BE41DE,0x1ADAD47D,0x6DDDE4EB,0xF4D4B551,0x83D385C7,
0x136C9856,0x646BA8C0,0xFD62F97A,0x8A65C9EC,0x14015C4F,0x63066CD9,0xFA0F3D63,0x8D080DF5,
0x3B6E20C8,0x4C69105E,0xD56041E4,0xA2677172,0x3C03E4D1,0x4B04D447,0xD20D85FD,0xA50AB56B,
0x35B5A8FA,0x42B2986C,0xDBBBC9D6,0xACBCF940,0x32D86CE3,0x45DF5C75,0xDCD60DCF,0xABD13D59,
0x26D930AC,0x51DE003A,0xC8D75180,0xBFD06116,0x21B4F4B5,0x56B3C423,0xCFBA9599,0xB8BDA50F,
0x2802B89E,0x5F058808,0xC60CD9B2,0xB10BE924,0x2F6F7C87,0x58684C11,0xC1611DAB,0xB6662D3D,
0x76DC4190,0x01DB7106,0x98D220BC,0xEFD5102A,0x71B18589,0x06B6B51F,0x9FBFE4A5,0xE8B8D433,
0x7807C9A2,0x0F00F934,0x9609A88E,0xE10E9818,0x7F6A0DBB,0x086D3D2D,0x91646C97,0xE6635C01,
0x6B6B51F4,0x1C6C6162,0x856530D8,0xF262004E,0x6C0695ED,0x1B01A57B,0x8208F4C1,0xF50FC457,
0x65B0D9C6,0x12B7E950,0x8BBEB8EA,0xFCB9887C,0x62DD1DDF,0x15DA2D49,0x8CD37CF3,0xFBD44C65,
0x4DB26158,0x3AB551CE,0xA3BC0074,0xD4BB30E2,0x4ADFA541,0x3DD895D7,0xA4D1C46D,0xD3D6F4FB,
0x4369E96A,0x346ED9FC,0xAD678846,0xDA60B8D0,0x44042D73,0x33031DE5,0xAA0A4C5F,0xDD0D7CC9,
0x5005713C,0x270241AA,0xBE0B1010,0xC90C2086,0x5768B525,0x206F85B3,0xB966D409,0xCE61E49F,
0x5EDEF90E,0x29D9C998,0xB0D09822,0xC7D7A8B4,0x59B33D17,0x2EB40D81,0xB7BD5C3B,0xC0BA6CAD,
0xEDB88320,0x9ABFB3B6,0x03B6E20C,0x74B1D29A,0xEAD54739,0x9DD277AF,0x04DB2615,0x73DC1683,
0xE3630B12,0x94643B84,0x0D6D6A3E,0x7A6A5AA8,0xE40ECF0B,0x9309FF9D,0x0A00AE27,0x7D079EB1,
0xF00F9344,0x8708A3D2,0x1E01F268,0x6906C2FE,0xF762575D,0x806567CB,0x196C3671,0x6E6B06E7,
0xFED41B76,0x89D32BE0,0x10DA7A5A,0x67DD4ACC,0xF9B9DF6F,0x8EBEEFF9,0x17B7BE43,0x60B08ED5,
0xD6D6A3E8,0xA1D1937E,0x38D8C2C4,0x4FDFF252,0xD1BB67F1,0xA6BC5767,0x3FB506DD,0x48B2364B,
0xD80D2BDA,0xAF0A1B4C,0x36034AF6,0x41047A60,0xDF60EFC3,0xA867DF55,0x316E8EEF,0x4669BE79,
0xCB61B38C,0xBC66831A,0x256FD2A0,0x5268E236,0xCC0C7795,0xBB0B4703,0x220216B9,0x5505262F,
0xC5BA3BBE,0xB2BD0B28,0x2BB45A92,0x5CB36A04,0xC2D7FFA7,0xB5D0CF31,0x2CD99E8B,0x5BDEAE1D,
0x9B64C2B0,0xEC63F226,0x756AA39C,0x026D930A,0x9C0906A9,0xEB0E363F,0x72076785,0x05005713,
0x95BF4A82,0xE2B87A14,0x7BB12BAE,0x0CB61B38,0x92D28E9B,0xE5D5BE0D,0x7CDCEFB7,0x0BDBDF21,
0x86D3D2D4,0xF1D4E242,0x68DDB3F8,0x1FDA836E,0x81BE16CD,0xF6B9265B,0x6FB077E1,0x18B74777,
0x88085AE6,0xFF0F6A70,0x66063BCA,0x11010B5C,0x8F659EFF,0xF862AE69,0x616BFFD3,0x166CCF45,
0xA00AE278,0xD70DD2EE,0x4E048354,0x3903B3C2,0xA7672661,0xD06016F7,0x4969474D,0x3E6E77DB,
0xAED16A4A,0xD9D65ADC,0x40DF0B66,0x37D83BF0,0xA9BCAE53,0xDEBB9EC5,0x47B2CF7F,0x30B5FFE9,
0xBDBDF21C,0xCABAC28A,0x53B39330,0x24B4A3A6,0xBAD03605,0xCDD70693,0x54DE5729,0x23D967BF,
0xB3667A2E,0xC4614AB8,0x5D681B02,0x2A6F2B94,0xB40BBE37,0xC30C8EA1,0x5A05DF1B,0x2D02EF8D,
}
#if defined(CRC32_USE_LOOKUP_TABLE_SLICING_BY_4) || defined(CRC32_USE_LOOKUP_TABLE_SLICING_BY_8) || defined(CRC32_USE_LOOKUP_TABLE_SLICING_BY_16)
// beyond this point only relevant for Slicing-by-4, Slicing-by-8 and Slicing-by-16
,{
0x00000000,0x191B3141,0x32366282,0x2B2D53C3,0x646CC504,0x7D77F445,0x565AA786,0x4F4196C7,
0xC8D98A08,0xD1C2BB49,0xFAEFE88A,0xE3F4D9CB,0xACB54F0C,0xB5AE7E4D,0x9E832D8E,0x87981CCF,
0x4AC21251,0x53D92310,0x78F470D3,0x61EF4192,0x2EAED755,0x37B5E614,0x1C98B5D7,0x05838496,
0x821B9859,0x9B00A918,0xB02DFADB,0xA936CB9A,0xE6775D5D,0xFF6C6C1C,0xD4413FDF,0xCD5A0E9E,
0x958424A2,0x8C9F15E3,0xA7B24620,0xBEA97761,0xF1E8E1A6,0xE8F3D0E7,0xC3DE8324,0xDAC5B265,
0x5D5DAEAA,0x44469FEB,0x6F6BCC28,0x7670FD69,0x39316BAE,0x202A5AEF,0x0B07092C,0x121C386D,
0xDF4636F3,0xC65D07B2,0xED705471,0xF46B6530,0xBB2AF3F7,0xA231C2B6,0x891C9175,0x9007A034,
0x179FBCFB,0x0E848DBA,0x25A9DE79,0x3CB2EF38,0x73F379FF,0x6AE848BE,0x41C51B7D,0x58DE2A3C,
0xF0794F05,0xE9627E44,0xC24F2D87,0xDB541CC6,0x94158A01,0x8D0EBB40,0xA623E883,0xBF38D9C2,
0x38A0C50D,0x21BBF44C,0x0A96A78F,0x138D96CE,0x5CCC0009,0x45D73148,0x6EFA628B,0x77E153CA,
0xBABB5D54,0xA3A06C15,0x888D3FD6,0x91960E97,0xDED79850,0xC7CCA911,0xECE1FAD2,0xF5FACB93,
0x7262D75C,0x6B79E61D,0x4054B5DE,0x594F849F,0x160E1258,0x0F152319,0x243870DA,0x3D23419B,
0x65FD6BA7,0x7CE65AE6,0x57CB0925,0x4ED03864,0x0191AEA3,0x188A9FE2,0x33A7CC21,0x2ABCFD60,
0xAD24E1AF,0xB43FD0EE,0x9F12832D,0x8609B26C,0xC94824AB,0xD05315EA,0xFB7E4629,0xE2657768,
0x2F3F79F6,0x362448B7,0x1D091B74,0x04122A35,0x4B53BCF2,0x52488DB3,0x7965DE70,0x607EEF31,
0xE7E6F3FE,0xFEFDC2BF,0xD5D0917C,0xCCCBA03D,0x838A36FA,0x9A9107BB,0xB1BC5478,0xA8A76539,
0x3B83984B,0x2298A90A,0x09B5FAC9,0x10AECB88,0x5FEF5D4F,0x46F46C0E,0x6DD93FCD,0x74C20E8C,
0xF35A1243,0xEA412302,0xC16C70C1,0xD8774180,0x9736D747,0x8E2DE606,0xA500B5C5,0xBC1B8484,
0x71418A1A,0x685ABB5B,0x4377E898,0x5A6CD9D9,0x152D4F1E,0x0C367E5F,0x271B2D9C,0x3E001CDD,
0xB9980012,0xA0833153,0x8BAE6290,0x92B553D1,0xDDF4C516,0xC4EFF457,0xEFC2A794,0xF6D996D5,
0xAE07BCE9,0xB71C8DA8,0x9C31DE6B,0x852AEF2A,0xCA6B79ED,0xD37048AC,0xF85D1B6F,0xE1462A2E,
0x66DE36E1,0x7FC507A0,0x54E85463,0x4DF36522,0x02B2F3E5,0x1BA9C2A4,0x30849167,0x299FA026,
0xE4C5AEB8,0xFDDE9FF9,0xD6F3CC3A,0xCFE8FD7B,0x80A96BBC,0x99B25AFD,0xB29F093E,0xAB84387F,
0x2C1C24B0,0x350715F1,0x1E2A4632,0x07317773,0x4870E1B4,0x516BD0F5,0x7A468336,0x635DB277,
0xCBFAD74E,0xD2E1E60F,0xF9CCB5CC,0xE0D7848D,0xAF96124A,0xB68D230B,0x9DA070C8,0x84BB4189,
0x03235D46,0x1A386C07,0x31153FC4,0x280E0E85,0x674F9842,0x7E54A903,0x5579FAC0,0x4C62CB81,
0x8138C51F,0x9823F45E,0xB30EA79D,0xAA1596DC,0xE554001B,0xFC4F315A,0xD7626299,0xCE7953D8,
0x49E14F17,0x50FA7E56,0x7BD72D95,0x62CC1CD4,0x2D8D8A13,0x3496BB52,0x1FBBE891,0x06A0D9D0,
0x5E7EF3EC,0x4765C2AD,0x6C48916E,0x7553A02F,0x3A1236E8,0x230907A9,0x0824546A,0x113F652B,
0x96A779E4,0x8FBC48A5,0xA4911B66,0xBD8A2A27,0xF2CBBCE0,0xEBD08DA1,0xC0FDDE62,0xD9E6EF23,
0x14BCE1BD,0x0DA7D0FC,0x268A833F,0x3F91B27E,0x70D024B9,0x69CB15F8,0x42E6463B,0x5BFD777A,
0xDC656BB5,0xC57E5AF4,0xEE530937,0xF7483876,0xB809AEB1,0xA1129FF0,0x8A3FCC33,0x9324FD72,
},
{
0x00000000,0x01C26A37,0x0384D46E,0x0246BE59,0x0709A8DC,0x06CBC2EB,0x048D7CB2,0x054F1685,
0x0E1351B8,0x0FD13B8F,0x0D9785D6,0x0C55EFE1,0x091AF964,0x08D89353,0x0A9E2D0A,0x0B5C473D,
0x1C26A370,0x1DE4C947,0x1FA2771E,0x1E601D29,0x1B2F0BAC,0x1AED619B,0x18ABDFC2,0x1969B5F5,
0x1235F2C8,0x13F798FF,0x11B126A6,0x10734C91,0x153C5A14,0x14FE3023,0x16B88E7A,0x177AE44D,
0x384D46E0,0x398F2CD7,0x3BC9928E,0x3A0BF8B9,0x3F44EE3C,0x3E86840B,0x3CC03A52,0x3D025065,
0x365E1758,0x379C7D6F,0x35DAC336,0x3418A901,0x3157BF84,0x3095D5B3,0x32D36BEA,0x331101DD,
0x246BE590,0x25A98FA7,0x27EF31FE,0x262D5BC9,0x23624D4C,0x22A0277B,0x20E69922,0x2124F315,
0x2A78B428,0x2BBADE1F,0x29FC6046,0x283E0A71,0x2D711CF4,0x2CB376C3,0x2EF5C89A,0x2F37A2AD,
0x709A8DC0,0x7158E7F7,0x731E59AE,0x72DC3399,0x7793251C,0x76514F2B,0x7417F172,0x75D59B45,
0x7E89DC78,0x7F4BB64F,0x7D0D0816,0x7CCF6221,0x798074A4,0x78421E93,0x7A04A0CA,0x7BC6CAFD,
0x6CBC2EB0,0x6D7E4487,0x6F38FADE,0x6EFA90E9,0x6BB5866C,0x6A77EC5B,0x68315202,0x69F33835,
0x62AF7F08,0x636D153F,0x612BAB66,0x60E9C151,0x65A6D7D4,0x6464BDE3,0x662203BA,0x67E0698D,
0x48D7CB20,0x4915A117,0x4B531F4E,0x4A917579,0x4FDE63FC,0x4E1C09CB,0x4C5AB792,0x4D98DDA5,
0x46C49A98,0x4706F0AF,0x45404EF6,0x448224C1,0x41CD3244,0x400F5873,0x4249E62A,0x438B8C1D,
0x54F16850,0x55330267,0x5775BC3E,0x56B7D609,0x53F8C08C,0x523AAABB,0x507C14E2,0x51BE7ED5,
0x5AE239E8,0x5B2053DF,0x5966ED86,0x58A487B1,0x5DEB9134,0x5C29FB03,0x5E6F455A,0x5FAD2F6D,
0xE1351B80,0xE0F771B7,0xE2B1CFEE,0xE373A5D9,0xE63CB35C,0xE7FED96B,0xE5B86732,0xE47A0D05,
0xEF264A38,0xEEE4200F,0xECA29E56,0xED60F461,0xE82FE2E4,0xE9ED88D3,0xEBAB368A,0xEA695CBD,
0xFD13B8F0,0xFCD1D2C7,0xFE976C9E,0xFF5506A9,0xFA1A102C,0xFBD87A1B,0xF99EC442,0xF85CAE75,
0xF300E948,0xF2C2837F,0xF0843D26,0xF1465711,0xF4094194,0xF5CB2BA3,0xF78D95FA,0xF64FFFCD,
0xD9785D60,0xD8BA3757,0xDAFC890E,0xDB3EE339,0xDE71F5BC,0xDFB39F8B,0xDDF521D2,0xDC374BE5,
0xD76B0CD8,0xD6A966EF,0xD4EFD8B6,0xD52DB281,0xD062A404,0xD1A0CE33,0xD3E6706A,0xD2241A5D,
0xC55EFE10,0xC49C9427,0xC6DA2A7E,0xC7184049,0xC25756CC,0xC3953CFB,0xC1D382A2,0xC011E895,
0xCB4DAFA8,0xCA8FC59F,0xC8C97BC6,0xC90B11F1,0xCC440774,0xCD866D43,0xCFC0D31A,0xCE02B92D,
0x91AF9640,0x906DFC77,0x922B422E,0x93E92819,0x96A63E9C,0x976454AB,0x9522EAF2,0x94E080C5,
0x9FBCC7F8,0x9E7EADCF,0x9C381396,0x9DFA79A1,0x98B56F24,0x99770513,0x9B31BB4A,0x9AF3D17D,
0x8D893530,0x8C4B5F07,0x8E0DE15E,0x8FCF8B69,0x8A809DEC,0x8B42F7DB,0x89044982,0x88C623B5,
0x839A6488,0x82580EBF,0x801EB0E6,0x81DCDAD1,0x8493CC54,0x8551A663,0x8717183A,0x86D5720D,
0xA9E2D0A0,0xA820BA97,0xAA6604CE,0xABA46EF9,0xAEEB787C,0xAF29124B,0xAD6FAC12,0xACADC625,
0xA7F18118,0xA633EB2F,0xA4755576,0xA5B73F41,0xA0F829C4,0xA13A43F3,0xA37CFDAA,0xA2BE979D,
0xB5C473D0,0xB40619E7,0xB640A7BE,0xB782CD89,0xB2CDDB0C,0xB30FB13B,0xB1490F62,0xB08B6555,
0xBBD72268,0xBA15485F,0xB853F606,0xB9919C31,0xBCDE8AB4,0xBD1CE083,0xBF5A5EDA,0xBE9834ED,
},
{
0x00000000,0xB8BC6765,0xAA09C88B,0x12B5AFEE,0x8F629757,0x37DEF032,0x256B5FDC,0x9DD738B9,
0xC5B428EF,0x7D084F8A,0x6FBDE064,0xD7018701,0x4AD6BFB8,0xF26AD8DD,0xE0DF7733,0x58631056,
0x5019579F,0xE8A530FA,0xFA109F14,0x42ACF871,0xDF7BC0C8,0x67C7A7AD,0x75720843,0xCDCE6F26,
0x95AD7F70,0x2D111815,0x3FA4B7FB,0x8718D09E,0x1ACFE827,0xA2738F42,0xB0C620AC,0x087A47C9,
0xA032AF3E,0x188EC85B,0x0A3B67B5,0xB28700D0,0x2F503869,0x97EC5F0C,0x8559F0E2,0x3DE59787,
0x658687D1,0xDD3AE0B4,0xCF8F4F5A,0x7733283F,0xEAE41086,0x525877E3,0x40EDD80D,0xF851BF68,
0xF02BF8A1,0x48979FC4,0x5A22302A,0xE29E574F,0x7F496FF6,0xC7F50893,0xD540A77D,0x6DFCC018,
0x359FD04E,0x8D23B72B,0x9F9618C5,0x272A7FA0,0xBAFD4719,0x0241207C,0x10F48F92,0xA848E8F7,
0x9B14583D,0x23A83F58,0x311D90B6,0x89A1F7D3,0x1476CF6A,0xACCAA80F,0xBE7F07E1,0x06C36084,
0x5EA070D2,0xE61C17B7,0xF4A9B859,0x4C15DF3C,0xD1C2E785,0x697E80E0,0x7BCB2F0E,0xC377486B,
0xCB0D0FA2,0x73B168C7,0x6104C729,0xD9B8A04C,0x446F98F5,0xFCD3FF90,0xEE66507E,0x56DA371B,
0x0EB9274D,0xB6054028,0xA4B0EFC6,0x1C0C88A3,0x81DBB01A,0x3967D77F,0x2BD27891,0x936E1FF4,
0x3B26F703,0x839A9066,0x912F3F88,0x299358ED,0xB4446054,0x0CF80731,0x1E4DA8DF,0xA6F1CFBA,
0xFE92DFEC,0x462EB889,0x549B1767,0xEC277002,0x71F048BB,0xC94C2FDE,0xDBF98030,0x6345E755,
0x6B3FA09C,0xD383C7F9,0xC1366817,0x798A0F72,0xE45D37CB,0x5CE150AE,0x4E54FF40,0xF6E89825,
0xAE8B8873,0x1637EF16,0x048240F8,0xBC3E279D,0x21E91F24,0x99557841,0x8BE0D7AF,0x335CB0CA,
0xED59B63B,0x55E5D15E,0x47507EB0,0xFFEC19D5,0x623B216C,0xDA874609,0xC832E9E7,0x708E8E82,
0x28ED9ED4,0x9051F9B1,0x82E4565F,0x3A58313A,0xA78F0983,0x1F336EE6,0x0D86C108,0xB53AA66D,
0xBD40E1A4,0x05FC86C1,0x1749292F,0xAFF54E4A,0x322276F3,0x8A9E1196,0x982BBE78,0x2097D91D,
0x78F4C94B,0xC048AE2E,0xD2FD01C0,0x6A4166A5,0xF7965E1C,0x4F2A3979,0x5D9F9697,0xE523F1F2,
0x4D6B1905,0xF5D77E60,0xE762D18E,0x5FDEB6EB,0xC2098E52,0x7AB5E937,0x680046D9,0xD0BC21BC,
0x88DF31EA,0x3063568F,0x22D6F961,0x9A6A9E04,0x07BDA6BD,0xBF01C1D8,0xADB46E36,0x15080953,
0x1D724E9A,0xA5CE29FF,0xB77B8611,0x0FC7E174,0x9210D9CD,0x2AACBEA8,0x38191146,0x80A57623,
0xD8C66675,0x607A0110,0x72CFAEFE,0xCA73C99B,0x57A4F122,0xEF189647,0xFDAD39A9,0x45115ECC,
0x764DEE06,0xCEF18963,0xDC44268D,0x64F841E8,0xF92F7951,0x41931E34,0x5326B1DA,0xEB9AD6BF,
0xB3F9C6E9,0x0B45A18C,0x19F00E62,0xA14C6907,0x3C9B51BE,0x842736DB,0x96929935,0x2E2EFE50,
0x2654B999,0x9EE8DEFC,0x8C5D7112,0x34E11677,0xA9362ECE,0x118A49AB,0x033FE645,0xBB838120,
0xE3E09176,0x5B5CF613,0x49E959FD,0xF1553E98,0x6C820621,0xD43E6144,0xC68BCEAA,0x7E37A9CF,
0xD67F4138,0x6EC3265D,0x7C7689B3,0xC4CAEED6,0x591DD66F,0xE1A1B10A,0xF3141EE4,0x4BA87981,
0x13CB69D7,0xAB770EB2,0xB9C2A15C,0x017EC639,0x9CA9FE80,0x241599E5,0x36A0360B,0x8E1C516E,
0x866616A7,0x3EDA71C2,0x2C6FDE2C,0x94D3B949,0x090481F0,0xB1B8E695,0xA30D497B,0x1BB12E1E,
0x43D23E48,0xFB6E592D,0xE9DBF6C3,0x516791A6,0xCCB0A91F,0x740CCE7A,0x66B96194,0xDE0506F1,
}
#endif // defined(CRC32_USE_LOOKUP_TABLE_SLICING_BY_4) || defined(CRC32_USE_LOOKUP_TABLE_SLICING_BY_8) || defined(CRC32_USE_LOOKUP_TABLE_SLICING_BY_16)
#if defined (CRC32_USE_LOOKUP_TABLE_SLICING_BY_8) || defined(CRC32_USE_LOOKUP_TABLE_SLICING_BY_16)
// beyond this point only relevant for Slicing-by-8 and Slicing-by-16
,{
0x00000000,0x3D6029B0,0x7AC05360,0x47A07AD0,0xF580A6C0,0xC8E08F70,0x8F40F5A0,0xB220DC10,
0x30704BC1,0x0D106271,0x4AB018A1,0x77D03111,0xC5F0ED01,0xF890C4B1,0xBF30BE61,0x825097D1,
0x60E09782,0x5D80BE32,0x1A20C4E2,0x2740ED52,0x95603142,0xA80018F2,0xEFA06222,0xD2C04B92,
0x5090DC43,0x6DF0F5F3,0x2A508F23,0x1730A693,0xA5107A83,0x98705333,0xDFD029E3,0xE2B00053,
0xC1C12F04,0xFCA106B4,0xBB017C64,0x866155D4,0x344189C4,0x0921A074,0x4E81DAA4,0x73E1F314,
0xF1B164C5,0xCCD14D75,0x8B7137A5,0xB6111E15,0x0431C205,0x3951EBB5,0x7EF19165,0x4391B8D5,
0xA121B886,0x9C419136,0xDBE1EBE6,0xE681C256,0x54A11E46,0x69C137F6,0x2E614D26,0x13016496,
0x9151F347,0xAC31DAF7,0xEB91A027,0xD6F18997,0x64D15587,0x59B17C37,0x1E1106E7,0x23712F57,
0x58F35849,0x659371F9,0x22330B29,0x1F532299,0xAD73FE89,0x9013D739,0xD7B3ADE9,0xEAD38459,
0x68831388,0x55E33A38,0x124340E8,0x2F236958,0x9D03B548,0xA0639CF8,0xE7C3E628,0xDAA3CF98,
0x3813CFCB,0x0573E67B,0x42D39CAB,0x7FB3B51B,0xCD93690B,0xF0F340BB,0xB7533A6B,0x8A3313DB,
0x0863840A,0x3503ADBA,0x72A3D76A,0x4FC3FEDA,0xFDE322CA,0xC0830B7A,0x872371AA,0xBA43581A,
0x9932774D,0xA4525EFD,0xE3F2242D,0xDE920D9D,0x6CB2D18D,0x51D2F83D,0x167282ED,0x2B12AB5D,
0xA9423C8C,0x9422153C,0xD3826FEC,0xEEE2465C,0x5CC29A4C,0x61A2B3FC,0x2602C92C,0x1B62E09C,
0xF9D2E0CF,0xC4B2C97F,0x8312B3AF,0xBE729A1F,0x0C52460F,0x31326FBF,0x7692156F,0x4BF23CDF,
0xC9A2AB0E,0xF4C282BE,0xB362F86E,0x8E02D1DE,0x3C220DCE,0x0142247E,0x46E25EAE,0x7B82771E,
0xB1E6B092,0x8C869922,0xCB26E3F2,0xF646CA42,0x44661652,0x79063FE2,0x3EA64532,0x03C66C82,
0x8196FB53,0xBCF6D2E3,0xFB56A833,0xC6368183,0x74165D93,0x49767423,0x0ED60EF3,0x33B62743,
0xD1062710,0xEC660EA0,0xABC67470,0x96A65DC0,0x248681D0,0x19E6A860,0x5E46D2B0,0x6326FB00,
0xE1766CD1,0xDC164561,0x9BB63FB1,0xA6D61601,0x14F6CA11,0x2996E3A1,0x6E369971,0x5356B0C1,
0x70279F96,0x4D47B626,0x0AE7CCF6,0x3787E546,0x85A73956,0xB8C710E6,0xFF676A36,0xC2074386,
0x4057D457,0x7D37FDE7,0x3A978737,0x07F7AE87,0xB5D77297,0x88B75B27,0xCF1721F7,0xF2770847,
0x10C70814,0x2DA721A4,0x6A075B74,0x576772C4,0xE547AED4,0xD8278764,0x9F87FDB4,0xA2E7D404,
0x20B743D5,0x1DD76A65,0x5A7710B5,0x67173905,0xD537E515,0xE857CCA5,0xAFF7B675,0x92979FC5,
0xE915E8DB,0xD475C16B,0x93D5BBBB,0xAEB5920B,0x1C954E1B,0x21F567AB,0x66551D7B,0x5B3534CB,
0xD965A31A,0xE4058AAA,0xA3A5F07A,0x9EC5D9CA,0x2CE505DA,0x11852C6A,0x562556BA,0x6B457F0A,
0x89F57F59,0xB49556E9,0xF3352C39,0xCE550589,0x7C75D999,0x4115F029,0x06B58AF9,0x3BD5A349,
0xB9853498,0x84E51D28,0xC34567F8,0xFE254E48,0x4C059258,0x7165BBE8,0x36C5C138,0x0BA5E888,
0x28D4C7DF,0x15B4EE6F,0x521494BF,0x6F74BD0F,0xDD54611F,0xE03448AF,0xA794327F,0x9AF41BCF,
0x18A48C1E,0x25C4A5AE,0x6264DF7E,0x5F04F6CE,0xED242ADE,0xD044036E,0x97E479BE,0xAA84500E,
0x4834505D,0x755479ED,0x32F4033D,0x0F942A8D,0xBDB4F69D,0x80D4DF2D,0xC774A5FD,0xFA148C4D,
0x78441B9C,0x4524322C,0x028448FC,0x3FE4614C,0x8DC4BD5C,0xB0A494EC,0xF704EE3C,0xCA64C78C,
},
{
0x00000000,0xCB5CD3A5,0x4DC8A10B,0x869472AE,0x9B914216,0x50CD91B3,0xD659E31D,0x1D0530B8,
0xEC53826D,0x270F51C8,0xA19B2366,0x6AC7F0C3,0x77C2C07B,0xBC9E13DE,0x3A0A6170,0xF156B2D5,
0x03D6029B,0xC88AD13E,0x4E1EA390,0x85427035,0x9847408D,0x531B9328,0xD58FE186,0x1ED33223,
0xEF8580F6,0x24D95353,0xA24D21FD,0x6911F258,0x7414C2E0,0xBF481145,0x39DC63EB,0xF280B04E,
0x07AC0536,0xCCF0D693,0x4A64A43D,0x81387798,0x9C3D4720,0x57619485,0xD1F5E62B,0x1AA9358E,
0xEBFF875B,0x20A354FE,0xA6372650,0x6D6BF5F5,0x706EC54D,0xBB3216E8,0x3DA66446,0xF6FAB7E3,
0x047A07AD,0xCF26D408,0x49B2A6A6,0x82EE7503,0x9FEB45BB,0x54B7961E,0xD223E4B0,0x197F3715,
0xE82985C0,0x23755665,0xA5E124CB,0x6EBDF76E,0x73B8C7D6,0xB8E41473,0x3E7066DD,0xF52CB578,
0x0F580A6C,0xC404D9C9,0x4290AB67,0x89CC78C2,0x94C9487A,0x5F959BDF,0xD901E971,0x125D3AD4,
0xE30B8801,0x28575BA4,0xAEC3290A,0x659FFAAF,0x789ACA17,0xB3C619B2,0x35526B1C,0xFE0EB8B9,
0x0C8E08F7,0xC7D2DB52,0x4146A9FC,0x8A1A7A59,0x971F4AE1,0x5C439944,0xDAD7EBEA,0x118B384F,
0xE0DD8A9A,0x2B81593F,0xAD152B91,0x6649F834,0x7B4CC88C,0xB0101B29,0x36846987,0xFDD8BA22,
0x08F40F5A,0xC3A8DCFF,0x453CAE51,0x8E607DF4,0x93654D4C,0x58399EE9,0xDEADEC47,0x15F13FE2,
0xE4A78D37,0x2FFB5E92,0xA96F2C3C,0x6233FF99,0x7F36CF21,0xB46A1C84,0x32FE6E2A,0xF9A2BD8F,
0x0B220DC1,0xC07EDE64,0x46EAACCA,0x8DB67F6F,0x90B34FD7,0x5BEF9C72,0xDD7BEEDC,0x16273D79,
0xE7718FAC,0x2C2D5C09,0xAAB92EA7,0x61E5FD02,0x7CE0CDBA,0xB7BC1E1F,0x31286CB1,0xFA74BF14,
0x1EB014D8,0xD5ECC77D,0x5378B5D3,0x98246676,0x852156CE,0x4E7D856B,0xC8E9F7C5,0x03B52460,
0xF2E396B5,0x39BF4510,0xBF2B37BE,0x7477E41B,0x6972D4A3,0xA22E0706,0x24BA75A8,0xEFE6A60D,
0x1D661643,0xD63AC5E6,0x50AEB748,0x9BF264ED,0x86F75455,0x4DAB87F0,0xCB3FF55E,0x006326FB,
0xF135942E,0x3A69478B,0xBCFD3525,0x77A1E680,0x6AA4D638,0xA1F8059D,0x276C7733,0xEC30A496,
0x191C11EE,0xD240C24B,0x54D4B0E5,0x9F886340,0x828D53F8,0x49D1805D,0xCF45F2F3,0x04192156,
0xF54F9383,0x3E134026,0xB8873288,0x73DBE12D,0x6EDED195,0xA5820230,0x2316709E,0xE84AA33B,
0x1ACA1375,0xD196C0D0,0x5702B27E,0x9C5E61DB,0x815B5163,0x4A0782C6,0xCC93F068,0x07CF23CD,
0xF6999118,0x3DC542BD,0xBB513013,0x700DE3B6,0x6D08D30E,0xA65400AB,0x20C07205,0xEB9CA1A0,
0x11E81EB4,0xDAB4CD11,0x5C20BFBF,0x977C6C1A,0x8A795CA2,0x41258F07,0xC7B1FDA9,0x0CED2E0C,
0xFDBB9CD9,0x36E74F7C,0xB0733DD2,0x7B2FEE77,0x662ADECF,0xAD760D6A,0x2BE27FC4,0xE0BEAC61,
0x123E1C2F,0xD962CF8A,0x5FF6BD24,0x94AA6E81,0x89AF5E39,0x42F38D9C,0xC467FF32,0x0F3B2C97,
0xFE6D9E42,0x35314DE7,0xB3A53F49,0x78F9ECEC,0x65FCDC54,0xAEA00FF1,0x28347D5F,0xE368AEFA,
0x16441B82,0xDD18C827,0x5B8CBA89,0x90D0692C,0x8DD55994,0x46898A31,0xC01DF89F,0x0B412B3A,
0xFA1799EF,0x314B4A4A,0xB7DF38E4,0x7C83EB41,0x6186DBF9,0xAADA085C,0x2C4E7AF2,0xE712A957,
0x15921919,0xDECECABC,0x585AB812,0x93066BB7,0x8E035B0F,0x455F88AA,0xC3CBFA04,0x089729A1,
0xF9C19B74,0x329D48D1,0xB4093A7F,0x7F55E9DA,0x6250D962,0xA90C0AC7,0x2F987869,0xE4C4ABCC,
},
{
0x00000000,0xA6770BB4,0x979F1129,0x31E81A9D,0xF44F2413,0x52382FA7,0x63D0353A,0xC5A73E8E,
0x33EF4E67,0x959845D3,0xA4705F4E,0x020754FA,0xC7A06A74,0x61D761C0,0x503F7B5D,0xF64870E9,
0x67DE9CCE,0xC1A9977A,0xF0418DE7,0x56368653,0x9391B8DD,0x35E6B369,0x040EA9F4,0xA279A240,
0x5431D2A9,0xF246D91D,0xC3AEC380,0x65D9C834,0xA07EF6BA,0x0609FD0E,0x37E1E793,0x9196EC27,
0xCFBD399C,0x69CA3228,0x582228B5,0xFE552301,0x3BF21D8F,0x9D85163B,0xAC6D0CA6,0x0A1A0712,
0xFC5277FB,0x5A257C4F,0x6BCD66D2,0xCDBA6D66,0x081D53E8,0xAE6A585C,0x9F8242C1,0x39F54975,
0xA863A552,0x0E14AEE6,0x3FFCB47B,0x998BBFCF,0x5C2C8141,0xFA5B8AF5,0xCBB39068,0x6DC49BDC,
0x9B8CEB35,0x3DFBE081,0x0C13FA1C,0xAA64F1A8,0x6FC3CF26,0xC9B4C492,0xF85CDE0F,0x5E2BD5BB,
0x440B7579,0xE27C7ECD,0xD3946450,0x75E36FE4,0xB044516A,0x16335ADE,0x27DB4043,0x81AC4BF7,
0x77E43B1E,0xD19330AA,0xE07B2A37,0x460C2183,0x83AB1F0D,0x25DC14B9,0x14340E24,0xB2430590,
0x23D5E9B7,0x85A2E203,0xB44AF89E,0x123DF32A,0xD79ACDA4,0x71EDC610,0x4005DC8D,0xE672D739,
0x103AA7D0,0xB64DAC64,0x87A5B6F9,0x21D2BD4D,0xE47583C3,0x42028877,0x73EA92EA,0xD59D995E,
0x8BB64CE5,0x2DC14751,0x1C295DCC,0xBA5E5678,0x7FF968F6,0xD98E6342,0xE86679DF,0x4E11726B,
0xB8590282,0x1E2E0936,0x2FC613AB,0x89B1181F,0x4C162691,0xEA612D25,0xDB8937B8,0x7DFE3C0C,
0xEC68D02B,0x4A1FDB9F,0x7BF7C102,0xDD80CAB6,0x1827F438,0xBE50FF8C,0x8FB8E511,0x29CFEEA5,
0xDF879E4C,0x79F095F8,0x48188F65,0xEE6F84D1,0x2BC8BA5F,0x8DBFB1EB,0xBC57AB76,0x1A20A0C2,
0x8816EAF2,0x2E61E146,0x1F89FBDB,0xB9FEF06F,0x7C59CEE1,0xDA2EC555,0xEBC6DFC8,0x4DB1D47C,
0xBBF9A495,0x1D8EAF21,0x2C66B5BC,0x8A11BE08,0x4FB68086,0xE9C18B32,0xD82991AF,0x7E5E9A1B,
0xEFC8763C,0x49BF7D88,0x78576715,0xDE206CA1,0x1B87522F,0xBDF0599B,0x8C184306,0x2A6F48B2,
0xDC27385B,0x7A5033EF,0x4BB82972,0xEDCF22C6,0x28681C48,0x8E1F17FC,0xBFF70D61,0x198006D5,
0x47ABD36E,0xE1DCD8DA,0xD034C247,0x7643C9F3,0xB3E4F77D,0x1593FCC9,0x247BE654,0x820CEDE0,
0x74449D09,0xD23396BD,0xE3DB8C20,0x45AC8794,0x800BB91A,0x267CB2AE,0x1794A833,0xB1E3A387,
0x20754FA0,0x86024414,0xB7EA5E89,0x119D553D,0xD43A6BB3,0x724D6007,0x43A57A9A,0xE5D2712E,
0x139A01C7,0xB5ED0A73,0x840510EE,0x22721B5A,0xE7D525D4,0x41A22E60,0x704A34FD,0xD63D3F49,
0xCC1D9F8B,0x6A6A943F,0x5B828EA2,0xFDF58516,0x3852BB98,0x9E25B02C,0xAFCDAAB1,0x09BAA105,
0xFFF2D1EC,0x5985DA58,0x686DC0C5,0xCE1ACB71,0x0BBDF5FF,0xADCAFE4B,0x9C22E4D6,0x3A55EF62,
0xABC30345,0x0DB408F1,0x3C5C126C,0x9A2B19D8,0x5F8C2756,0xF9FB2CE2,0xC813367F,0x6E643DCB,
0x982C4D22,0x3E5B4696,0x0FB35C0B,0xA9C457BF,0x6C636931,0xCA146285,0xFBFC7818,0x5D8B73AC,
0x03A0A617,0xA5D7ADA3,0x943FB73E,0x3248BC8A,0xF7EF8204,0x519889B0,0x6070932D,0xC6079899,
0x304FE870,0x9638E3C4,0xA7D0F959,0x01A7F2ED,0xC400CC63,0x6277C7D7,0x539FDD4A,0xF5E8D6FE,
0x647E3AD9,0xC209316D,0xF3E12BF0,0x55962044,0x90311ECA,0x3646157E,0x07AE0FE3,0xA1D90457,
0x579174BE,0xF1E67F0A,0xC00E6597,0x66796E23,0xA3DE50AD,0x05A95B19,0x34414184,0x92364A30,
},
{
0x00000000,0xCCAA009E,0x4225077D,0x8E8F07E3,0x844A0EFA,0x48E00E64,0xC66F0987,0x0AC50919,
0xD3E51BB5,0x1F4F1B2B,0x91C01CC8,0x5D6A1C56,0x57AF154F,0x9B0515D1,0x158A1232,0xD92012AC,
0x7CBB312B,0xB01131B5,0x3E9E3656,0xF23436C8,0xF8F13FD1,0x345B3F4F,0xBAD438AC,0x767E3832,
0xAF5E2A9E,0x63F42A00,0xED7B2DE3,0x21D12D7D,0x2B142464,0xE7BE24FA,0x69312319,0xA59B2387,
0xF9766256,0x35DC62C8,0xBB53652B,0x77F965B5,0x7D3C6CAC,0xB1966C32,0x3F196BD1,0xF3B36B4F,
0x2A9379E3,0xE639797D,0x68B67E9E,0xA41C7E00,0xAED97719,0x62737787,0xECFC7064,0x205670FA,
0x85CD537D,0x496753E3,0xC7E85400,0x0B42549E,0x01875D87,0xCD2D5D19,0x43A25AFA,0x8F085A64,
0x562848C8,0x9A824856,0x140D4FB5,0xD8A74F2B,0xD2624632,0x1EC846AC,0x9047414F,0x5CED41D1,
0x299DC2ED,0xE537C273,0x6BB8C590,0xA712C50E,0xADD7CC17,0x617DCC89,0xEFF2CB6A,0x2358CBF4,
0xFA78D958,0x36D2D9C6,0xB85DDE25,0x74F7DEBB,0x7E32D7A2,0xB298D73C,0x3C17D0DF,0xF0BDD041,
0x5526F3C6,0x998CF358,0x1703F4BB,0xDBA9F425,0xD16CFD3C,0x1DC6FDA2,0x9349FA41,0x5FE3FADF,
0x86C3E873,0x4A69E8ED,0xC4E6EF0E,0x084CEF90,0x0289E689,0xCE23E617,0x40ACE1F4,0x8C06E16A,
0xD0EBA0BB,0x1C41A025,0x92CEA7C6,0x5E64A758,0x54A1AE41,0x980BAEDF,0x1684A93C,0xDA2EA9A2,
0x030EBB0E,0xCFA4BB90,0x412BBC73,0x8D81BCED,0x8744B5F4,0x4BEEB56A,0xC561B289,0x09CBB217,
0xAC509190,0x60FA910E,0xEE7596ED,0x22DF9673,0x281A9F6A,0xE4B09FF4,0x6A3F9817,0xA6959889,
0x7FB58A25,0xB31F8ABB,0x3D908D58,0xF13A8DC6,0xFBFF84DF,0x37558441,0xB9DA83A2,0x7570833C,
0x533B85DA,0x9F918544,0x111E82A7,0xDDB48239,0xD7718B20,0x1BDB8BBE,0x95548C5D,0x59FE8CC3,
0x80DE9E6F,0x4C749EF1,0xC2FB9912,0x0E51998C,0x04949095,0xC83E900B,0x46B197E8,0x8A1B9776,
0x2F80B4F1,0xE32AB46F,0x6DA5B38C,0xA10FB312,0xABCABA0B,0x6760BA95,0xE9EFBD76,0x2545BDE8,
0xFC65AF44,0x30CFAFDA,0xBE40A839,0x72EAA8A7,0x782FA1BE,0xB485A120,0x3A0AA6C3,0xF6A0A65D,
0xAA4DE78C,0x66E7E712,0xE868E0F1,0x24C2E06F,0x2E07E976,0xE2ADE9E8,0x6C22EE0B,0xA088EE95,
0x79A8FC39,0xB502FCA7,0x3B8DFB44,0xF727FBDA,0xFDE2F2C3,0x3148F25D,0xBFC7F5BE,0x736DF520,
0xD6F6D6A7,0x1A5CD639,0x94D3D1DA,0x5879D144,0x52BCD85D,0x9E16D8C3,0x1099DF20,0xDC33DFBE,
0x0513CD12,0xC9B9CD8C,0x4736CA6F,0x8B9CCAF1,0x8159C3E8,0x4DF3C376,0xC37CC495,0x0FD6C40B,
0x7AA64737,0xB60C47A9,0x3883404A,0xF42940D4,0xFEEC49CD,0x32464953,0xBCC94EB0,0x70634E2E,
0xA9435C82,0x65E95C1C,0xEB665BFF,0x27CC5B61,0x2D095278,0xE1A352E6,0x6F2C5505,0xA386559B,
0x061D761C,0xCAB77682,0x44387161,0x889271FF,0x825778E6,0x4EFD7878,0xC0727F9B,0x0CD87F05,
0xD5F86DA9,0x19526D37,0x97DD6AD4,0x5B776A4A,0x51B26353,0x9D1863CD,0x1397642E,0xDF3D64B0,
0x83D02561,0x4F7A25FF,0xC1F5221C,0x0D5F2282,0x079A2B9B,0xCB302B05,0x45BF2CE6,0x89152C78,
0x50353ED4,0x9C9F3E4A,0x121039A9,0xDEBA3937,0xD47F302E,0x18D530B0,0x965A3753,0x5AF037CD,
0xFF6B144A,0x33C114D4,0xBD4E1337,0x71E413A9,0x7B211AB0,0xB78B1A2E,0x39041DCD,0xF5AE1D53,
0x2C8E0FFF,0xE0240F61,0x6EAB0882,0xA201081C,0xA8C40105,0x646E019B,0xEAE10678,0x264B06E6,
}
#endif // CRC32_USE_LOOKUP_TABLE_SLICING_BY_8 || CRC32_USE_LOOKUP_TABLE_SLICING_BY_16
#ifdef CRC32_USE_LOOKUP_TABLE_SLICING_BY_16
// beyond this point only relevant for Slicing-by-16
,{
0x00000000,0x177B1443,0x2EF62886,0x398D3CC5,0x5DEC510C,0x4A97454F,0x731A798A,0x64616DC9,
0xBBD8A218,0xACA3B65B,0x952E8A9E,0x82559EDD,0xE634F314,0xF14FE757,0xC8C2DB92,0xDFB9CFD1,
0xACC04271,0xBBBB5632,0x82366AF7,0x954D7EB4,0xF12C137D,0xE657073E,0xDFDA3BFB,0xC8A12FB8,
0x1718E069,0x0063F42A,0x39EEC8EF,0x2E95DCAC,0x4AF4B165,0x5D8FA526,0x640299E3,0x73798DA0,
0x82F182A3,0x958A96E0,0xAC07AA25,0xBB7CBE66,0xDF1DD3AF,0xC866C7EC,0xF1EBFB29,0xE690EF6A,
0x392920BB,0x2E5234F8,0x17DF083D,0x00A41C7E,0x64C571B7,0x73BE65F4,0x4A335931,0x5D484D72,
0x2E31C0D2,0x394AD491,0x00C7E854,0x17BCFC17,0x73DD91DE,0x64A6859D,0x5D2BB958,0x4A50AD1B,
0x95E962CA,0x82927689,0xBB1F4A4C,0xAC645E0F,0xC80533C6,0xDF7E2785,0xE6F31B40,0xF1880F03,
0xDE920307,0xC9E91744,0xF0642B81,0xE71F3FC2,0x837E520B,0x94054648,0xAD887A8D,0xBAF36ECE,
0x654AA11F,0x7231B55C,0x4BBC8999,0x5CC79DDA,0x38A6F013,0x2FDDE450,0x1650D895,0x012BCCD6,
0x72524176,0x65295535,0x5CA469F0,0x4BDF7DB3,0x2FBE107A,0x38C50439,0x014838FC,0x16332CBF,
0xC98AE36E,0xDEF1F72D,0xE77CCBE8,0xF007DFAB,0x9466B262,0x831DA621,0xBA909AE4,0xADEB8EA7,
0x5C6381A4,0x4B1895E7,0x7295A922,0x65EEBD61,0x018FD0A8,0x16F4C4EB,0x2F79F82E,0x3802EC6D,
0xE7BB23BC,0xF0C037FF,0xC94D0B3A,0xDE361F79,0xBA5772B0,0xAD2C66F3,0x94A15A36,0x83DA4E75,
0xF0A3C3D5,0xE7D8D796,0xDE55EB53,0xC92EFF10,0xAD4F92D9,0xBA34869A,0x83B9BA5F,0x94C2AE1C,
0x4B7B61CD,0x5C00758E,0x658D494B,0x72F65D08,0x169730C1,0x01EC2482,0x38611847,0x2F1A0C04,
0x6655004F,0x712E140C,0x48A328C9,0x5FD83C8A,0x3BB95143,0x2CC24500,0x154F79C5,0x02346D86,
0xDD8DA257,0xCAF6B614,0xF37B8AD1,0xE4009E92,0x8061F35B,0x971AE718,0xAE97DBDD,0xB9ECCF9E,
0xCA95423E,0xDDEE567D,0xE4636AB8,0xF3187EFB,0x97791332,0x80020771,0xB98F3BB4,0xAEF42FF7,
0x714DE026,0x6636F465,0x5FBBC8A0,0x48C0DCE3,0x2CA1B12A,0x3BDAA569,0x025799AC,0x152C8DEF,
0xE4A482EC,0xF3DF96AF,0xCA52AA6A,0xDD29BE29,0xB948D3E0,0xAE33C7A3,0x97BEFB66,0x80C5EF25,
0x5F7C20F4,0x480734B7,0x718A0872,0x66F11C31,0x029071F8,0x15EB65BB,0x2C66597E,0x3B1D4D3D,
0x4864C09D,0x5F1FD4DE,0x6692E81B,0x71E9FC58,0x15889191,0x02F385D2,0x3B7EB917,0x2C05AD54,
0xF3BC6285,0xE4C776C6,0xDD4A4A03,0xCA315E40,0xAE503389,0xB92B27CA,0x80A61B0F,0x97DD0F4C,
0xB8C70348,0xAFBC170B,0x96312BCE,0x814A3F8D,0xE52B5244,0xF2504607,0xCBDD7AC2,0xDCA66E81,
0x031FA150,0x1464B513,0x2DE989D6,0x3A929D95,0x5EF3F05C,0x4988E41F,0x7005D8DA,0x677ECC99,
0x14074139,0x037C557A,0x3AF169BF,0x2D8A7DFC,0x49EB1035,0x5E900476,0x671D38B3,0x70662CF0,
0xAFDFE321,0xB8A4F762,0x8129CBA7,0x9652DFE4,0xF233B22D,0xE548A66E,0xDCC59AAB,0xCBBE8EE8,
0x3A3681EB,0x2D4D95A8,0x14C0A96D,0x03BBBD2E,0x67DAD0E7,0x70A1C4A4,0x492CF861,0x5E57EC22,
0x81EE23F3,0x969537B0,0xAF180B75,0xB8631F36,0xDC0272FF,0xCB7966BC,0xF2F45A79,0xE58F4E3A,
0x96F6C39A,0x818DD7D9,0xB800EB1C,0xAF7BFF5F,0xCB1A9296,0xDC6186D5,0xE5ECBA10,0xF297AE53,
0x2D2E6182,0x3A5575C1,0x03D84904,0x14A35D47,0x70C2308E,0x67B924CD,0x5E341808,0x494F0C4B,
},
{
0x00000000,0xEFC26B3E,0x04F5D03D,0xEB37BB03,0x09EBA07A,0xE629CB44,0x0D1E7047,0xE2DC1B79,
0x13D740F4,0xFC152BCA,0x172290C9,0xF8E0FBF7,0x1A3CE08E,0xF5FE8BB0,0x1EC930B3,0xF10B5B8D,
0x27AE81E8,0xC86CEAD6,0x235B51D5,0xCC993AEB,0x2E452192,0xC1874AAC,0x2AB0F1AF,0xC5729A91,
0x3479C11C,0xDBBBAA22,0x308C1121,0xDF4E7A1F,0x3D926166,0xD2500A58,0x3967B15B,0xD6A5DA65,
0x4F5D03D0,0xA09F68EE,0x4BA8D3ED,0xA46AB8D3,0x46B6A3AA,0xA974C894,0x42437397,0xAD8118A9,
0x5C8A4324,0xB348281A,0x587F9319,0xB7BDF827,0x5561E35E,0xBAA38860,0x51943363,0xBE56585D,
0x68F38238,0x8731E906,0x6C065205,0x83C4393B,0x61182242,0x8EDA497C,0x65EDF27F,0x8A2F9941,
0x7B24C2CC,0x94E6A9F2,0x7FD112F1,0x901379CF,0x72CF62B6,0x9D0D0988,0x763AB28B,0x99F8D9B5,
0x9EBA07A0,0x71786C9E,0x9A4FD79D,0x758DBCA3,0x9751A7DA,0x7893CCE4,0x93A477E7,0x7C661CD9,
0x8D6D4754,0x62AF2C6A,0x89989769,0x665AFC57,0x8486E72E,0x6B448C10,0x80733713,0x6FB15C2D,
0xB9148648,0x56D6ED76,0xBDE15675,0x52233D4B,0xB0FF2632,0x5F3D4D0C,0xB40AF60F,0x5BC89D31,
0xAAC3C6BC,0x4501AD82,0xAE361681,0x41F47DBF,0xA32866C6,0x4CEA0DF8,0xA7DDB6FB,0x481FDDC5,
0xD1E70470,0x3E256F4E,0xD512D44D,0x3AD0BF73,0xD80CA40A,0x37CECF34,0xDCF97437,0x333B1F09,
0xC2304484,0x2DF22FBA,0xC6C594B9,0x2907FF87,0xCBDBE4FE,0x24198FC0,0xCF2E34C3,0x20EC5FFD,
0xF6498598,0x198BEEA6,0xF2BC55A5,0x1D7E3E9B,0xFFA225E2,0x10604EDC,0xFB57F5DF,0x14959EE1,
0xE59EC56C,0x0A5CAE52,0xE16B1551,0x0EA97E6F,0xEC756516,0x03B70E28,0xE880B52B,0x0742DE15,
0xE6050901,0x09C7623F,0xE2F0D93C,0x0D32B202,0xEFEEA97B,0x002CC245,0xEB1B7946,0x04D91278,
0xF5D249F5,0x1A1022CB,0xF12799C8,0x1EE5F2F6,0xFC39E98F,0x13FB82B1,0xF8CC39B2,0x170E528C,
0xC1AB88E9,0x2E69E3D7,0xC55E58D4,0x2A9C33EA,0xC8402893,0x278243AD,0xCCB5F8AE,0x23779390,
0xD27CC81D,0x3DBEA323,0xD6891820,0x394B731E,0xDB976867,0x34550359,0xDF62B85A,0x30A0D364,
0xA9580AD1,0x469A61EF,0xADADDAEC,0x426FB1D2,0xA0B3AAAB,0x4F71C195,0xA4467A96,0x4B8411A8,
0xBA8F4A25,0x554D211B,0xBE7A9A18,0x51B8F126,0xB364EA5F,0x5CA68161,0xB7913A62,0x5853515C,
0x8EF68B39,0x6134E007,0x8A035B04,0x65C1303A,0x871D2B43,0x68DF407D,0x83E8FB7E,0x6C2A9040,
0x9D21CBCD,0x72E3A0F3,0x99D41BF0,0x761670CE,0x94CA6BB7,0x7B080089,0x903FBB8A,0x7FFDD0B4,
0x78BF0EA1,0x977D659F,0x7C4ADE9C,0x9388B5A2,0x7154AEDB,0x9E96C5E5,0x75A17EE6,0x9A6315D8,
0x6B684E55,0x84AA256B,0x6F9D9E68,0x805FF556,0x6283EE2F,0x8D418511,0x66763E12,0x89B4552C,
0x5F118F49,0xB0D3E477,0x5BE45F74,0xB426344A,0x56FA2F33,0xB938440D,0x520FFF0E,0xBDCD9430,
0x4CC6CFBD,0xA304A483,0x48331F80,0xA7F174BE,0x452D6FC7,0xAAEF04F9,0x41D8BFFA,0xAE1AD4C4,
0x37E20D71,0xD820664F,0x3317DD4C,0xDCD5B672,0x3E09AD0B,0xD1CBC635,0x3AFC7D36,0xD53E1608,
0x24354D85,0xCBF726BB,0x20C09DB8,0xCF02F686,0x2DDEEDFF,0xC21C86C1,0x292B3DC2,0xC6E956FC,
0x104C8C99,0xFF8EE7A7,0x14B95CA4,0xFB7B379A,0x19A72CE3,0xF66547DD,0x1D52FCDE,0xF29097E0,
0x039BCC6D,0xEC59A753,0x076E1C50,0xE8AC776E,0x0A706C17,0xE5B20729,0x0E85BC2A,0xE147D714,
},
{
0x00000000,0xC18EDFC0,0x586CB9C1,0x99E26601,0xB0D97382,0x7157AC42,0xE8B5CA43,0x293B1583,
0xBAC3E145,0x7B4D3E85,0xE2AF5884,0x23218744,0x0A1A92C7,0xCB944D07,0x52762B06,0x93F8F4C6,
0xAEF6C4CB,0x6F781B0B,0xF69A7D0A,0x3714A2CA,0x1E2FB749,0xDFA16889,0x46430E88,0x87CDD148,
0x1435258E,0xD5BBFA4E,0x4C599C4F,0x8DD7438F,0xA4EC560C,0x656289CC,0xFC80EFCD,0x3D0E300D,
0x869C8FD7,0x47125017,0xDEF03616,0x1F7EE9D6,0x3645FC55,0xF7CB2395,0x6E294594,0xAFA79A54,
0x3C5F6E92,0xFDD1B152,0x6433D753,0xA5BD0893,0x8C861D10,0x4D08C2D0,0xD4EAA4D1,0x15647B11,
0x286A4B1C,0xE9E494DC,0x7006F2DD,0xB1882D1D,0x98B3389E,0x593DE75E,0xC0DF815F,0x01515E9F,
0x92A9AA59,0x53277599,0xCAC51398,0x0B4BCC58,0x2270D9DB,0xE3FE061B,0x7A1C601A,0xBB92BFDA,
0xD64819EF,0x17C6C62F,0x8E24A02E,0x4FAA7FEE,0x66916A6D,0xA71FB5AD,0x3EFDD3AC,0xFF730C6C,
0x6C8BF8AA,0xAD05276A,0x34E7416B,0xF5699EAB,0xDC528B28,0x1DDC54E8,0x843E32E9,0x45B0ED29,
0x78BEDD24,0xB93002E4,0x20D264E5,0xE15CBB25,0xC867AEA6,0x09E97166,0x900B1767,0x5185C8A7,
0xC27D3C61,0x03F3E3A1,0x9A1185A0,0x5B9F5A60,0x72A44FE3,0xB32A9023,0x2AC8F622,0xEB4629E2,
0x50D49638,0x915A49F8,0x08B82FF9,0xC936F039,0xE00DE5BA,0x21833A7A,0xB8615C7B,0x79EF83BB,
0xEA17777D,0x2B99A8BD,0xB27BCEBC,0x73F5117C,0x5ACE04FF,0x9B40DB3F,0x02A2BD3E,0xC32C62FE,
0xFE2252F3,0x3FAC8D33,0xA64EEB32,0x67C034F2,0x4EFB2171,0x8F75FEB1,0x169798B0,0xD7194770,
0x44E1B3B6,0x856F6C76,0x1C8D0A77,0xDD03D5B7,0xF438C034,0x35B61FF4,0xAC5479F5,0x6DDAA635,
0x77E1359F,0xB66FEA5F,0x2F8D8C5E,0xEE03539E,0xC738461D,0x06B699DD,0x9F54FFDC,0x5EDA201C,
0xCD22D4DA,0x0CAC0B1A,0x954E6D1B,0x54C0B2DB,0x7DFBA758,0xBC757898,0x25971E99,0xE419C159,
0xD917F154,0x18992E94,0x817B4895,0x40F59755,0x69CE82D6,0xA8405D16,0x31A23B17,0xF02CE4D7,
0x63D41011,0xA25ACFD1,0x3BB8A9D0,0xFA367610,0xD30D6393,0x1283BC53,0x8B61DA52,0x4AEF0592,
0xF17DBA48,0x30F36588,0xA9110389,0x689FDC49,0x41A4C9CA,0x802A160A,0x19C8700B,0xD846AFCB,
0x4BBE5B0D,0x8A3084CD,0x13D2E2CC,0xD25C3D0C,0xFB67288F,0x3AE9F74F,0xA30B914E,0x62854E8E,
0x5F8B7E83,0x9E05A143,0x07E7C742,0xC6691882,0xEF520D01,0x2EDCD2C1,0xB73EB4C0,0x76B06B00,
0xE5489FC6,0x24C64006,0xBD242607,0x7CAAF9C7,0x5591EC44,0x941F3384,0x0DFD5585,0xCC738A45,
0xA1A92C70,0x6027F3B0,0xF9C595B1,0x384B4A71,0x11705FF2,0xD0FE8032,0x491CE633,0x889239F3,
0x1B6ACD35,0xDAE412F5,0x430674F4,0x8288AB34,0xABB3BEB7,0x6A3D6177,0xF3DF0776,0x3251D8B6,
0x0F5FE8BB,0xCED1377B,0x5733517A,0x96BD8EBA,0xBF869B39,0x7E0844F9,0xE7EA22F8,0x2664FD38,
0xB59C09FE,0x7412D63E,0xEDF0B03F,0x2C7E6FFF,0x05457A7C,0xC4CBA5BC,0x5D29C3BD,0x9CA71C7D,
0x2735A3A7,0xE6BB7C67,0x7F591A66,0xBED7C5A6,0x97ECD025,0x56620FE5,0xCF8069E4,0x0E0EB624,
0x9DF642E2,0x5C789D22,0xC59AFB23,0x041424E3,0x2D2F3160,0xECA1EEA0,0x754388A1,0xB4CD5761,
0x89C3676C,0x484DB8AC,0xD1AFDEAD,0x1021016D,0x391A14EE,0xF894CB2E,0x6176AD2F,0xA0F872EF,
0x33008629,0xF28E59E9,0x6B6C3FE8,0xAAE2E028,0x83D9F5AB,0x42572A6B,0xDBB54C6A,0x1A3B93AA,
},
{
0x00000000,0x9BA54C6F,0xEC3B9E9F,0x779ED2F0,0x03063B7F,0x98A37710,0xEF3DA5E0,0x7498E98F,
0x060C76FE,0x9DA93A91,0xEA37E861,0x7192A40E,0x050A4D81,0x9EAF01EE,0xE931D31E,0x72949F71,
0x0C18EDFC,0x97BDA193,0xE0237363,0x7B863F0C,0x0F1ED683,0x94BB9AEC,0xE325481C,0x78800473,
0x0A149B02,0x91B1D76D,0xE62F059D,0x7D8A49F2,0x0912A07D,0x92B7EC12,0xE5293EE2,0x7E8C728D,
0x1831DBF8,0x83949797,0xF40A4567,0x6FAF0908,0x1B37E087,0x8092ACE8,0xF70C7E18,0x6CA93277,
0x1E3DAD06,0x8598E169,0xF2063399,0x69A37FF6,0x1D3B9679,0x869EDA16,0xF10008E6,0x6AA54489,
0x14293604,0x8F8C7A6B,0xF812A89B,0x63B7E4F4,0x172F0D7B,0x8C8A4114,0xFB1493E4,0x60B1DF8B,
0x122540FA,0x89800C95,0xFE1EDE65,0x65BB920A,0x11237B85,0x8A8637EA,0xFD18E51A,0x66BDA975,
0x3063B7F0,0xABC6FB9F,0xDC58296F,0x47FD6500,0x33658C8F,0xA8C0C0E0,0xDF5E1210,0x44FB5E7F,
0x366FC10E,0xADCA8D61,0xDA545F91,0x41F113FE,0x3569FA71,0xAECCB61E,0xD95264EE,0x42F72881,
0x3C7B5A0C,0xA7DE1663,0xD040C493,0x4BE588FC,0x3F7D6173,0xA4D82D1C,0xD346FFEC,0x48E3B383,
0x3A772CF2,0xA1D2609D,0xD64CB26D,0x4DE9FE02,0x3971178D,0xA2D45BE2,0xD54A8912,0x4EEFC57D,
0x28526C08,0xB3F72067,0xC469F297,0x5FCCBEF8,0x2B545777,0xB0F11B18,0xC76FC9E8,0x5CCA8587,
0x2E5E1AF6,0xB5FB5699,0xC2658469,0x59C0C806,0x2D582189,0xB6FD6DE6,0xC163BF16,0x5AC6F379,
0x244A81F4,0xBFEFCD9B,0xC8711F6B,0x53D45304,0x274CBA8B,0xBCE9F6E4,0xCB772414,0x50D2687B,
0x2246F70A,0xB9E3BB65,0xCE7D6995,0x55D825FA,0x2140CC75,0xBAE5801A,0xCD7B52EA,0x56DE1E85,
0x60C76FE0,0xFB62238F,0x8CFCF17F,0x1759BD10,0x63C1549F,0xF86418F0,0x8FFACA00,0x145F866F,
0x66CB191E,0xFD6E5571,0x8AF08781,0x1155CBEE,0x65CD2261,0xFE686E0E,0x89F6BCFE,0x1253F091,
0x6CDF821C,0xF77ACE73,0x80E41C83,0x1B4150EC,0x6FD9B963,0xF47CF50C,0x83E227FC,0x18476B93,
0x6AD3F4E2,0xF176B88D,0x86E86A7D,0x1D4D2612,0x69D5CF9D,0xF27083F2,0x85EE5102,0x1E4B1D6D,
0x78F6B418,0xE353F877,0x94CD2A87,0x0F6866E8,0x7BF08F67,0xE055C308,0x97CB11F8,0x0C6E5D97,
0x7EFAC2E6,0xE55F8E89,0x92C15C79,0x09641016,0x7DFCF999,0xE659B5F6,0x91C76706,0x0A622B69,
0x74EE59E4,0xEF4B158B,0x98D5C77B,0x03708B14,0x77E8629B,0xEC4D2EF4,0x9BD3FC04,0x0076B06B,
0x72E22F1A,0xE9476375,0x9ED9B185,0x057CFDEA,0x71E41465,0xEA41580A,0x9DDF8AFA,0x067AC695,
0x50A4D810,0xCB01947F,0xBC9F468F,0x273A0AE0,0x53A2E36F,0xC807AF00,0xBF997DF0,0x243C319F,
0x56A8AEEE,0xCD0DE281,0xBA933071,0x21367C1E,0x55AE9591,0xCE0BD9FE,0xB9950B0E,0x22304761,
0x5CBC35EC,0xC7197983,0xB087AB73,0x2B22E71C,0x5FBA0E93,0xC41F42FC,0xB381900C,0x2824DC63,
0x5AB04312,0xC1150F7D,0xB68BDD8D,0x2D2E91E2,0x59B6786D,0xC2133402,0xB58DE6F2,0x2E28AA9D,
0x489503E8,0xD3304F87,0xA4AE9D77,0x3F0BD118,0x4B933897,0xD03674F8,0xA7A8A608,0x3C0DEA67,
0x4E997516,0xD53C3979,0xA2A2EB89,0x3907A7E6,0x4D9F4E69,0xD63A0206,0xA1A4D0F6,0x3A019C99,
0x448DEE14,0xDF28A27B,0xA8B6708B,0x33133CE4,0x478BD56B,0xDC2E9904,0xABB04BF4,0x3015079B,
0x428198EA,0xD924D485,0xAEBA0675,0x351F4A1A,0x4187A395,0xDA22EFFA,0xADBC3D0A,0x36197165,
},
{
0x00000000,0xDD96D985,0x605CB54B,0xBDCA6CCE,0xC0B96A96,0x1D2FB313,0xA0E5DFDD,0x7D730658,
0x5A03D36D,0x87950AE8,0x3A5F6626,0xE7C9BFA3,0x9ABAB9FB,0x472C607E,0xFAE60CB0,0x2770D535,
0xB407A6DA,0x69917F5F,0xD45B1391,0x09CDCA14,0x74BECC4C,0xA92815C9,0x14E27907,0xC974A082,
0xEE0475B7,0x3392AC32,0x8E58C0FC,0x53CE1979,0x2EBD1F21,0xF32BC6A4,0x4EE1AA6A,0x937773EF,
0xB37E4BF5,0x6EE89270,0xD322FEBE,0x0EB4273B,0x73C72163,0xAE51F8E6,0x139B9428,0xCE0D4DAD,
0xE97D9898,0x34EB411D,0x89212DD3,0x54B7F456,0x29C4F20E,0xF4522B8B,0x49984745,0x940E9EC0,
0x0779ED2F,0xDAEF34AA,0x67255864,0xBAB381E1,0xC7C087B9,0x1A565E3C,0xA79C32F2,0x7A0AEB77,
0x5D7A3E42,0x80ECE7C7,0x3D268B09,0xE0B0528C,0x9DC354D4,0x40558D51,0xFD9FE19F,0x2009381A,
0xBD8D91AB,0x601B482E,0xDDD124E0,0x0047FD65,0x7D34FB3D,0xA0A222B8,0x1D684E76,0xC0FE97F3,
0xE78E42C6,0x3A189B43,0x87D2F78D,0x5A442E08,0x27372850,0xFAA1F1D5,0x476B9D1B,0x9AFD449E,
0x098A3771,0xD41CEEF4,0x69D6823A,0xB4405BBF,0xC9335DE7,0x14A58462,0xA96FE8AC,0x74F93129,
0x5389E41C,0x8E1F3D99,0x33D55157,0xEE4388D2,0x93308E8A,0x4EA6570F,0xF36C3BC1,0x2EFAE244,
0x0EF3DA5E,0xD36503DB,0x6EAF6F15,0xB339B690,0xCE4AB0C8,0x13DC694D,0xAE160583,0x7380DC06,
0x54F00933,0x8966D0B6,0x34ACBC78,0xE93A65FD,0x944963A5,0x49DFBA20,0xF415D6EE,0x29830F6B,
0xBAF47C84,0x6762A501,0xDAA8C9CF,0x073E104A,0x7A4D1612,0xA7DBCF97,0x1A11A359,0xC7877ADC,
0xE0F7AFE9,0x3D61766C,0x80AB1AA2,0x5D3DC327,0x204EC57F,0xFDD81CFA,0x40127034,0x9D84A9B1,
0xA06A2517,0x7DFCFC92,0xC036905C,0x1DA049D9,0x60D34F81,0xBD459604,0x008FFACA,0xDD19234F,
0xFA69F67A,0x27FF2FFF,0x9A354331,0x47A39AB4,0x3AD09CEC,0xE7464569,0x5A8C29A7,0x871AF022,
0x146D83CD,0xC9FB5A48,0x74313686,0xA9A7EF03,0xD4D4E95B,0x094230DE,0xB4885C10,0x691E8595,
0x4E6E50A0,0x93F88925,0x2E32E5EB,0xF3A43C6E,0x8ED73A36,0x5341E3B3,0xEE8B8F7D,0x331D56F8,
0x13146EE2,0xCE82B767,0x7348DBA9,0xAEDE022C,0xD3AD0474,0x0E3BDDF1,0xB3F1B13F,0x6E6768BA,
0x4917BD8F,0x9481640A,0x294B08C4,0xF4DDD141,0x89AED719,0x54380E9C,0xE9F26252,0x3464BBD7,
0xA713C838,0x7A8511BD,0xC74F7D73,0x1AD9A4F6,0x67AAA2AE,0xBA3C7B2B,0x07F617E5,0xDA60CE60,
0xFD101B55,0x2086C2D0,0x9D4CAE1E,0x40DA779B,0x3DA971C3,0xE03FA846,0x5DF5C488,0x80631D0D,
0x1DE7B4BC,0xC0716D39,0x7DBB01F7,0xA02DD872,0xDD5EDE2A,0x00C807AF,0xBD026B61,0x6094B2E4,
0x47E467D1,0x9A72BE54,0x27B8D29A,0xFA2E0B1F,0x875D0D47,0x5ACBD4C2,0xE701B80C,0x3A976189,
0xA9E01266,0x7476CBE3,0xC9BCA72D,0x142A7EA8,0x695978F0,0xB4CFA175,0x0905CDBB,0xD493143E,
0xF3E3C10B,0x2E75188E,0x93BF7440,0x4E29ADC5,0x335AAB9D,0xEECC7218,0x53061ED6,0x8E90C753,
0xAE99FF49,0x730F26CC,0xCEC54A02,0x13539387,0x6E2095DF,0xB3B64C5A,0x0E7C2094,0xD3EAF911,
0xF49A2C24,0x290CF5A1,0x94C6996F,0x495040EA,0x342346B2,0xE9B59F37,0x547FF3F9,0x89E92A7C,
0x1A9E5993,0xC7088016,0x7AC2ECD8,0xA754355D,0xDA273305,0x07B1EA80,0xBA7B864E,0x67ED5FCB,
0x409D8AFE,0x9D0B537B,0x20C13FB5,0xFD57E630,0x8024E068,0x5DB239ED,0xE0785523,0x3DEE8CA6,
},
{
0x00000000,0x9D0FE176,0xE16EC4AD,0x7C6125DB,0x19AC8F1B,0x84A36E6D,0xF8C24BB6,0x65CDAAC0,
0x33591E36,0xAE56FF40,0xD237DA9B,0x4F383BED,0x2AF5912D,0xB7FA705B,0xCB9B5580,0x5694B4F6,
0x66B23C6C,0xFBBDDD1A,0x87DCF8C1,0x1AD319B7,0x7F1EB377,0xE2115201,0x9E7077DA,0x037F96AC,
0x55EB225A,0xC8E4C32C,0xB485E6F7,0x298A0781,0x4C47AD41,0xD1484C37,0xAD2969EC,0x3026889A,
0xCD6478D8,0x506B99AE,0x2C0ABC75,0xB1055D03,0xD4C8F7C3,0x49C716B5,0x35A6336E,0xA8A9D218,
0xFE3D66EE,0x63328798,0x1F53A243,0x825C4335,0xE791E9F5,0x7A9E0883,0x06FF2D58,0x9BF0CC2E,
0xABD644B4,0x36D9A5C2,0x4AB88019,0xD7B7616F,0xB27ACBAF,0x2F752AD9,0x53140F02,0xCE1BEE74,
0x988F5A82,0x0580BBF4,0x79E19E2F,0xE4EE7F59,0x8123D599,0x1C2C34EF,0x604D1134,0xFD42F042,
0x41B9F7F1,0xDCB61687,0xA0D7335C,0x3DD8D22A,0x581578EA,0xC51A999C,0xB97BBC47,0x24745D31,
0x72E0E9C7,0xEFEF08B1,0x938E2D6A,0x0E81CC1C,0x6B4C66DC,0xF64387AA,0x8A22A271,0x172D4307,
0x270BCB9D,0xBA042AEB,0xC6650F30,0x5B6AEE46,0x3EA74486,0xA3A8A5F0,0xDFC9802B,0x42C6615D,
0x1452D5AB,0x895D34DD,0xF53C1106,0x6833F070,0x0DFE5AB0,0x90F1BBC6,0xEC909E1D,0x719F7F6B,
0x8CDD8F29,0x11D26E5F,0x6DB34B84,0xF0BCAAF2,0x95710032,0x087EE144,0x741FC49F,0xE91025E9,
0xBF84911F,0x228B7069,0x5EEA55B2,0xC3E5B4C4,0xA6281E04,0x3B27FF72,0x4746DAA9,0xDA493BDF,
0xEA6FB345,0x77605233,0x0B0177E8,0x960E969E,0xF3C33C5E,0x6ECCDD28,0x12ADF8F3,0x8FA21985,
0xD936AD73,0x44394C05,0x385869DE,0xA55788A8,0xC09A2268,0x5D95C31E,0x21F4E6C5,0xBCFB07B3,
0x8373EFE2,0x1E7C0E94,0x621D2B4F,0xFF12CA39,0x9ADF60F9,0x07D0818F,0x7BB1A454,0xE6BE4522,
0xB02AF1D4,0x2D2510A2,0x51443579,0xCC4BD40F,0xA9867ECF,0x34899FB9,0x48E8BA62,0xD5E75B14,
0xE5C1D38E,0x78CE32F8,0x04AF1723,0x99A0F655,0xFC6D5C95,0x6162BDE3,0x1D039838,0x800C794E,
0xD698CDB8,0x4B972CCE,0x37F60915,0xAAF9E863,0xCF3442A3,0x523BA3D5,0x2E5A860E,0xB3556778,
0x4E17973A,0xD318764C,0xAF795397,0x3276B2E1,0x57BB1821,0xCAB4F957,0xB6D5DC8C,0x2BDA3DFA,
0x7D4E890C,0xE041687A,0x9C204DA1,0x012FACD7,0x64E20617,0xF9EDE761,0x858CC2BA,0x188323CC,
0x28A5AB56,0xB5AA4A20,0xC9CB6FFB,0x54C48E8D,0x3109244D,0xAC06C53B,0xD067E0E0,0x4D680196,
0x1BFCB560,0x86F35416,0xFA9271CD,0x679D90BB,0x02503A7B,0x9F5FDB0D,0xE33EFED6,0x7E311FA0,
0xC2CA1813,0x5FC5F965,0x23A4DCBE,0xBEAB3DC8,0xDB669708,0x4669767E,0x3A0853A5,0xA707B2D3,
0xF1930625,0x6C9CE753,0x10FDC288,0x8DF223FE,0xE83F893E,0x75306848,0x09514D93,0x945EACE5,
0xA478247F,0x3977C509,0x4516E0D2,0xD81901A4,0xBDD4AB64,0x20DB4A12,0x5CBA6FC9,0xC1B58EBF,
0x97213A49,0x0A2EDB3F,0x764FFEE4,0xEB401F92,0x8E8DB552,0x13825424,0x6FE371FF,0xF2EC9089,
0x0FAE60CB,0x92A181BD,0xEEC0A466,0x73CF4510,0x1602EFD0,0x8B0D0EA6,0xF76C2B7D,0x6A63CA0B,
0x3CF77EFD,0xA1F89F8B,0xDD99BA50,0x40965B26,0x255BF1E6,0xB8541090,0xC435354B,0x593AD43D,
0x691C5CA7,0xF413BDD1,0x8872980A,0x157D797C,0x70B0D3BC,0xEDBF32CA,0x91DE1711,0x0CD1F667,
0x5A454291,0xC74AA3E7,0xBB2B863C,0x2624674A,0x43E9CD8A,0xDEE62CFC,0xA2870927,0x3F88E851,
},
{
0x00000000,0xB9FBDBE8,0xA886B191,0x117D6A79,0x8A7C6563,0x3387BE8B,0x22FAD4F2,0x9B010F1A,
0xCF89CC87,0x7672176F,0x670F7D16,0xDEF4A6FE,0x45F5A9E4,0xFC0E720C,0xED731875,0x5488C39D,
0x44629F4F,0xFD9944A7,0xECE42EDE,0x551FF536,0xCE1EFA2C,0x77E521C4,0x66984BBD,0xDF639055,
0x8BEB53C8,0x32108820,0x236DE259,0x9A9639B1,0x019736AB,0xB86CED43,0xA911873A,0x10EA5CD2,
0x88C53E9E,0x313EE576,0x20438F0F,0x99B854E7,0x02B95BFD,0xBB428015,0xAA3FEA6C,0x13C43184,
0x474CF219,0xFEB729F1,0xEFCA4388,0x56319860,0xCD30977A,0x74CB4C92,0x65B626EB,0xDC4DFD03,
0xCCA7A1D1,0x755C7A39,0x64211040,0xDDDACBA8,0x46DBC4B2,0xFF201F5A,0xEE5D7523,0x57A6AECB,
0x032E6D56,0xBAD5B6BE,0xABA8DCC7,0x1253072F,0x89520835,0x30A9D3DD,0x21D4B9A4,0x982F624C,
0xCAFB7B7D,0x7300A095,0x627DCAEC,0xDB861104,0x40871E1E,0xF97CC5F6,0xE801AF8F,0x51FA7467,
0x0572B7FA,0xBC896C12,0xADF4066B,0x140FDD83,0x8F0ED299,0x36F50971,0x27886308,0x9E73B8E0,
0x8E99E432,0x37623FDA,0x261F55A3,0x9FE48E4B,0x04E58151,0xBD1E5AB9,0xAC6330C0,0x1598EB28,
0x411028B5,0xF8EBF35D,0xE9969924,0x506D42CC,0xCB6C4DD6,0x7297963E,0x63EAFC47,0xDA1127AF,
0x423E45E3,0xFBC59E0B,0xEAB8F472,0x53432F9A,0xC8422080,0x71B9FB68,0x60C49111,0xD93F4AF9,
0x8DB78964,0x344C528C,0x253138F5,0x9CCAE31D,0x07CBEC07,0xBE3037EF,0xAF4D5D96,0x16B6867E,
0x065CDAAC,0xBFA70144,0xAEDA6B3D,0x1721B0D5,0x8C20BFCF,0x35DB6427,0x24A60E5E,0x9D5DD5B6,
0xC9D5162B,0x702ECDC3,0x6153A7BA,0xD8A87C52,0x43A97348,0xFA52A8A0,0xEB2FC2D9,0x52D41931,
0x4E87F0BB,0xF77C2B53,0xE601412A,0x5FFA9AC2,0xC4FB95D8,0x7D004E30,0x6C7D2449,0xD586FFA1,
0x810E3C3C,0x38F5E7D4,0x29888DAD,0x90735645,0x0B72595F,0xB28982B7,0xA3F4E8CE,0x1A0F3326,
0x0AE56FF4,0xB31EB41C,0xA263DE65,0x1B98058D,0x80990A97,0x3962D17F,0x281FBB06,0x91E460EE,
0xC56CA373,0x7C97789B,0x6DEA12E2,0xD411C90A,0x4F10C610,0xF6EB1DF8,0xE7967781,0x5E6DAC69,
0xC642CE25,0x7FB915CD,0x6EC47FB4,0xD73FA45C,0x4C3EAB46,0xF5C570AE,0xE4B81AD7,0x5D43C13F,
0x09CB02A2,0xB030D94A,0xA14DB333,0x18B668DB,0x83B767C1,0x3A4CBC29,0x2B31D650,0x92CA0DB8,
0x8220516A,0x3BDB8A82,0x2AA6E0FB,0x935D3B13,0x085C3409,0xB1A7EFE1,0xA0DA8598,0x19215E70,
0x4DA99DED,0xF4524605,0xE52F2C7C,0x5CD4F794,0xC7D5F88E,0x7E2E2366,0x6F53491F,0xD6A892F7,
0x847C8BC6,0x3D87502E,0x2CFA3A57,0x9501E1BF,0x0E00EEA5,0xB7FB354D,0xA6865F34,0x1F7D84DC,
0x4BF54741,0xF20E9CA9,0xE373F6D0,0x5A882D38,0xC1892222,0x7872F9CA,0x690F93B3,0xD0F4485B,
0xC01E1489,0x79E5CF61,0x6898A518,0xD1637EF0,0x4A6271EA,0xF399AA02,0xE2E4C07B,0x5B1F1B93,
0x0F97D80E,0xB66C03E6,0xA711699F,0x1EEAB277,0x85EBBD6D,0x3C106685,0x2D6D0CFC,0x9496D714,
0x0CB9B558,0xB5426EB0,0xA43F04C9,0x1DC4DF21,0x86C5D03B,0x3F3E0BD3,0x2E4361AA,0x97B8BA42,
0xC33079DF,0x7ACBA237,0x6BB6C84E,0xD24D13A6,0x494C1CBC,0xF0B7C754,0xE1CAAD2D,0x583176C5,
0x48DB2A17,0xF120F1FF,0xE05D9B86,0x59A6406E,0xC2A74F74,0x7B5C949C,0x6A21FEE5,0xD3DA250D,
0x8752E690,0x3EA93D78,0x2FD45701,0x962F8CE9,0x0D2E83F3,0xB4D5581B,0xA5A83262,0x1C53E98A,
},
{
0x00000000,0xAE689191,0x87A02563,0x29C8B4F2,0xD4314C87,0x7A59DD16,0x539169E4,0xFDF9F875,
0x73139F4F,0xDD7B0EDE,0xF4B3BA2C,0x5ADB2BBD,0xA722D3C8,0x094A4259,0x2082F6AB,0x8EEA673A,
0xE6273E9E,0x484FAF0F,0x61871BFD,0xCFEF8A6C,0x32167219,0x9C7EE388,0xB5B6577A,0x1BDEC6EB,
0x9534A1D1,0x3B5C3040,0x129484B2,0xBCFC1523,0x4105ED56,0xEF6D7CC7,0xC6A5C835,0x68CD59A4,
0x173F7B7D,0xB957EAEC,0x909F5E1E,0x3EF7CF8F,0xC30E37FA,0x6D66A66B,0x44AE1299,0xEAC68308,
0x642CE432,0xCA4475A3,0xE38CC151,0x4DE450C0,0xB01DA8B5,0x1E753924,0x37BD8DD6,0x99D51C47,
0xF11845E3,0x5F70D472,0x76B86080,0xD8D0F111,0x25290964,0x8B4198F5,0xA2892C07,0x0CE1BD96,
0x820BDAAC,0x2C634B3D,0x05ABFFCF,0xABC36E5E,0x563A962B,0xF85207BA,0xD19AB348,0x7FF222D9,
0x2E7EF6FA,0x8016676B,0xA9DED399,0x07B64208,0xFA4FBA7D,0x54272BEC,0x7DEF9F1E,0xD3870E8F,
0x5D6D69B5,0xF305F824,0xDACD4CD6,0x74A5DD47,0x895C2532,0x2734B4A3,0x0EFC0051,0xA09491C0,
0xC859C864,0x663159F5,0x4FF9ED07,0xE1917C96,0x1C6884E3,0xB2001572,0x9BC8A180,0x35A03011,
0xBB4A572B,0x1522C6BA,0x3CEA7248,0x9282E3D9,0x6F7B1BAC,0xC1138A3D,0xE8DB3ECF,0x46B3AF5E,
0x39418D87,0x97291C16,0xBEE1A8E4,0x10893975,0xED70C100,0x43185091,0x6AD0E463,0xC4B875F2,
0x4A5212C8,0xE43A8359,0xCDF237AB,0x639AA63A,0x9E635E4F,0x300BCFDE,0x19C37B2C,0xB7ABEABD,
0xDF66B319,0x710E2288,0x58C6967A,0xF6AE07EB,0x0B57FF9E,0xA53F6E0F,0x8CF7DAFD,0x229F4B6C,
0xAC752C56,0x021DBDC7,0x2BD50935,0x85BD98A4,0x784460D1,0xD62CF140,0xFFE445B2,0x518CD423,
0x5CFDEDF4,0xF2957C65,0xDB5DC897,0x75355906,0x88CCA173,0x26A430E2,0x0F6C8410,0xA1041581,
0x2FEE72BB,0x8186E32A,0xA84E57D8,0x0626C649,0xFBDF3E3C,0x55B7AFAD,0x7C7F1B5F,0xD2178ACE,
0xBADAD36A,0x14B242FB,0x3D7AF609,0x93126798,0x6EEB9FED,0xC0830E7C,0xE94BBA8E,0x47232B1F,
0xC9C94C25,0x67A1DDB4,0x4E696946,0xE001F8D7,0x1DF800A2,0xB3909133,0x9A5825C1,0x3430B450,
0x4BC29689,0xE5AA0718,0xCC62B3EA,0x620A227B,0x9FF3DA0E,0x319B4B9F,0x1853FF6D,0xB63B6EFC,
0x38D109C6,0x96B99857,0xBF712CA5,0x1119BD34,0xECE04541,0x4288D4D0,0x6B406022,0xC528F1B3,
0xADE5A817,0x038D3986,0x2A458D74,0x842D1CE5,0x79D4E490,0xD7BC7501,0xFE74C1F3,0x501C5062,
0xDEF63758,0x709EA6C9,0x5956123B,0xF73E83AA,0x0AC77BDF,0xA4AFEA4E,0x8D675EBC,0x230FCF2D,
0x72831B0E,0xDCEB8A9F,0xF5233E6D,0x5B4BAFFC,0xA6B25789,0x08DAC618,0x211272EA,0x8F7AE37B,
0x01908441,0xAFF815D0,0x8630A122,0x285830B3,0xD5A1C8C6,0x7BC95957,0x5201EDA5,0xFC697C34,
0x94A42590,0x3ACCB401,0x130400F3,0xBD6C9162,0x40956917,0xEEFDF886,0xC7354C74,0x695DDDE5,
0xE7B7BADF,0x49DF2B4E,0x60179FBC,0xCE7F0E2D,0x3386F658,0x9DEE67C9,0xB426D33B,0x1A4E42AA,
0x65BC6073,0xCBD4F1E2,0xE21C4510,0x4C74D481,0xB18D2CF4,0x1FE5BD65,0x362D0997,0x98459806,
0x16AFFF3C,0xB8C76EAD,0x910FDA5F,0x3F674BCE,0xC29EB3BB,0x6CF6222A,0x453E96D8,0xEB560749,
0x839B5EED,0x2DF3CF7C,0x043B7B8E,0xAA53EA1F,0x57AA126A,0xF9C283FB,0xD00A3709,0x7E62A698,
0xF088C1A2,0x5EE05033,0x7728E4C1,0xD9407550,0x24B98D25,0x8AD11CB4,0xA319A846,0x0D7139D7,
}
#endif // CRC32_USE_LOOKUP_TABLE_SLICING_BY_16
};
#endif
```
|
=====================================================================================================================================
SOURCE CODE FILE: file_adapter.h
LINES: 1
SIZE: 0.88 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\caffe2\serialize\file_adapter.h
ENCODING: utf-8
```h
#pragma once
#include <c10/macros/Macros.h>
#include <fstream>
#include <memory>
#include "caffe2/serialize/istream_adapter.h"
#include "caffe2/serialize/read_adapter_interface.h"
namespace caffe2 {
namespace serialize {
class TORCH_API FileAdapter final : public ReadAdapterInterface {
public:
C10_DISABLE_COPY_AND_ASSIGN(FileAdapter);
explicit FileAdapter(const std::string& file_name);
size_t size() const override;
size_t read(uint64_t pos, void* buf, size_t n, const char* what = "")
const override;
~FileAdapter() override;
private:
// An RAII Wrapper for a FILE pointer. Closes on destruction.
struct RAIIFile {
FILE* fp_;
explicit RAIIFile(const std::string& file_name);
~RAIIFile();
};
RAIIFile file_;
// The size of the opened file in bytes
uint64_t size_;
};
} // namespace serialize
} // namespace caffe2
```
|
==========================================================================================================================================
SOURCE CODE FILE: in_memory_adapter.h
LINES: 1
SIZE: 0.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\caffe2\serialize\in_memory_adapter.h
ENCODING: utf-8
```h
#pragma once
#include <caffe2/serialize/read_adapter_interface.h>
#include <cstring>
namespace caffe2 {
namespace serialize {
class MemoryReadAdapter final : public caffe2::serialize::ReadAdapterInterface {
public:
explicit MemoryReadAdapter(const void* data, off_t size)
: data_(data), size_(size) {}
size_t size() const override {
return size_;
}
size_t read(uint64_t pos, void* buf, size_t n, const char* what = "")
const override {
(void)what;
memcpy(buf, (int8_t*)(data_) + pos, n);
return n;
}
private:
const void* data_;
off_t size_;
};
} // namespace serialize
} // namespace caffe2
```
|
=========================================================================================================================================
SOURCE CODE FILE: inline_container.h
LINES: 1
SIZE: 10.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\caffe2\serialize\inline_container.h
ENCODING: utf-8
```h
#pragma once
#include <cerrno>
#include <cstdio>
#include <cstring>
#include <fstream>
#include <istream>
#include <mutex>
#include <ostream>
#include <unordered_set>
#include <c10/core/Allocator.h>
#include <c10/core/Backend.h>
#include "caffe2/serialize/istream_adapter.h"
#include "caffe2/serialize/read_adapter_interface.h"
#include "caffe2/serialize/versions.h"
extern "C" {
typedef struct mz_zip_archive mz_zip_archive;
}
// PyTorch containers are a special zip archive with the following layout
// archive_name.zip contains:
// archive_name/
// version # a file with a single decimal number written in ascii,
// # used to establish the version of the archive format
// model.json # overall model description, this is a json output of
// # ModelDef from torch.proto
// # the following names are by convention only, model.json will
// # refer to these files by full names
// tensors/
// 0 # flat storage for tensor data, meta-data about shapes, etc. is
// # in model.json
// 1
// ...
// # code entries will only exist for modules that have methods attached
// code/
// archive_name.py # serialized torch script code (python syntax, using
// PythonPrint) archive_name_my_submodule.py # submodules have separate
// files
//
// The PyTorchStreamWriter also ensures additional useful properties for these
// files
// 1. All files are stored uncompressed.
// 2. All files in the archive are aligned to 64 byte boundaries such that
// it is possible to mmap the entire file and get an aligned pointer to
// tensor data.
// 3. We universally write in ZIP64 format for consistency.
// The PyTorchStreamReader also provides additional properties:
// 1. It can read zip files that are created with common
// zip tools. This means that even though our writer doesn't compress files,
// the reader can still read files that were compressed.
// 2. It provides a getRecordOffset function which returns the offset into the
// raw file where file data lives. If the file was written with
// PyTorchStreamWriter it is guaranteed to be 64 byte aligned.
// PyTorchReader/Writer handle checking the version number on the archive format
// and ensure that all files are written to a archive_name directory so they
// unzip cleanly.
// When developing this format we want to pay particular attention to the
// following use cases:
//
// -- Reading --
// 1) Reading with full random access
// a) Reading with file api's such as fread()
// b) mmaping the file and jumping around the mapped region
// 2) Reading with 1-pass sequential access
// -> A reader will need to build up a data structure of parsed structures
// as it reads
//
// -- Writing --
// 1) Writing with full random access
// 2) Writing with 1-pass sequential access
// -> We must take care not to require updating values that have already
// been written. We place the variable-length index at the end and do
// not put any indicies into the header to fulfill this constraint.
// The model.json, which contains all the metadata information,
// should be written as the last file. One reason is that the size of tensor
// data is usually stable. As long as the shape and type of the tensor do not
// change, the size of the data won't change. On the other sied, the size of the
// serialized model is likely to change, so we store it as the last record, and
// we don't need to move previous records when updating the model data.
// The zip format is sufficiently flexible to handle the above use-case.
// it puts its central directory at the end of the archive and we write
// model.json as the last file when writing after we have accumulated all
// other information.
namespace caffe2 {
namespace serialize {
static constexpr const char* kSerializationIdRecordName =
".data/serialization_id";
struct MzZipReaderIterWrapper;
class TORCH_API ChunkRecordIterator {
public:
~ChunkRecordIterator();
// Read at most `chunkSize` into `buf`. Return the number of actual bytes
// read.
size_t next(void* buf);
size_t recordSize() const {
return recordSize_;
}
private:
ChunkRecordIterator(
size_t recordSize,
size_t chunkSize,
std::unique_ptr<MzZipReaderIterWrapper> iter);
const size_t recordSize_;
const size_t chunkSize_;
size_t offset_;
std::unique_ptr<MzZipReaderIterWrapper> iter_;
friend class PyTorchStreamReader;
};
class TORCH_API PyTorchStreamReader final {
public:
explicit PyTorchStreamReader(const std::string& file_name);
explicit PyTorchStreamReader(std::istream* in);
explicit PyTorchStreamReader(std::shared_ptr<ReadAdapterInterface> in);
// return dataptr, size
std::tuple<at::DataPtr, size_t> getRecord(const std::string& name);
// multi-thread getRecord
std::tuple<at::DataPtr, size_t> getRecord(
const std::string& name,
std::vector<std::shared_ptr<ReadAdapterInterface>>& additionalReaders);
// inplace memory writing
size_t getRecord(const std::string& name, void* dst, size_t n);
// inplace memory writing, multi-threads.
// When additionalReaders is empty, the default behavior is call
// getRecord(name, dst, n) with default reader This approach can be used for
// reading large tensors.
size_t getRecord(
const std::string& name,
void* dst,
size_t n,
std::vector<std::shared_ptr<ReadAdapterInterface>>& additionalReaders);
size_t getRecord(
const std::string& name,
void* dst,
size_t n,
size_t chunk_size,
void* buf,
const std::function<void(void*, const void*, size_t)>& memcpy_func =
nullptr);
// Concurrent reading records with multiple readers.
// additionalReaders are additional clients to access the underlying record at
// different offsets and write to different trunks of buffers. If the overall
// size of the tensor is 10, and size of additionalReader is 2. The default
// thread will read [0,4), the additional reader will read [4,8). The default
// reader will read [8,10). The default reader will write to buffer[0,4), the
// additional reader will write to buffer[4,8), the additional reader will
// write to buffer[8,10). When additionalReaders is empty, the default
// behavior is call getRecord(name) with default reader This approach can be
// used for reading large tensors.
size_t getRecordMultiReaders(
const std::string& name,
std::vector<std::shared_ptr<ReadAdapterInterface>>& additionalReaders,
void* dst,
size_t n);
size_t getRecordSize(const std::string& name);
size_t getRecordHeaderOffset(const std::string& name);
size_t getRecordOffset(const std::string& name);
size_t getRecordOffsetNoRead(
size_t cursor,
std::string filename,
size_t size,
uint64_t alignment);
bool hasRecord(const std::string& name);
std::vector<std::string> getAllRecords();
ChunkRecordIterator createChunkReaderIter(
const std::string& name,
const size_t recordSize,
const size_t chunkSize);
~PyTorchStreamReader();
uint64_t version() const {
return version_;
}
const std::string& serializationId() {
return serialization_id_;
}
void setShouldLoadDebugSymbol(bool should_load_debug_symbol) {
load_debug_symbol_ = should_load_debug_symbol;
}
void setAdditionalReaderSizeThreshold(const size_t& size) {
additional_reader_size_threshold_ = size;
}
private:
void init();
size_t read(uint64_t pos, char* buf, size_t n);
void valid(const char* what, const char* info = "");
size_t getRecordID(const std::string& name);
friend size_t
istream_read_func(void* pOpaque, uint64_t file_ofs, void* pBuf, size_t n);
std::unique_ptr<mz_zip_archive> ar_;
std::string archive_name_;
std::string archive_name_plus_slash_;
std::shared_ptr<ReadAdapterInterface> in_;
int64_t version_;
std::mutex reader_lock_;
bool load_debug_symbol_ = true;
std::string serialization_id_;
size_t additional_reader_size_threshold_;
};
class TORCH_API PyTorchStreamWriter final {
public:
explicit PyTorchStreamWriter(
const std::string& archive_name,
bool compute_crc32 = true,
uint64_t alignment = 64);
explicit PyTorchStreamWriter(
const std::function<size_t(const void*, size_t)> writer_func,
bool compute_crc32 = true,
uint64_t alignment = 64);
void setMinVersion(const uint64_t version);
void writeRecord(
const std::string& name,
const void* data,
size_t size,
bool compress = false);
void writeEndOfFile();
const std::unordered_set<std::string>& getAllWrittenRecords();
bool finalized() const {
return finalized_;
}
const std::string& archiveName() {
return archive_name_;
}
const std::string& serializationId() {
return serialization_id_;
}
~PyTorchStreamWriter();
private:
void setup(const std::string& file_name);
void valid(const char* what, const char* info = "");
void writeSerializationId();
size_t current_pos_ = 0;
std::unordered_set<std::string> files_written_;
std::unique_ptr<mz_zip_archive> ar_;
std::string archive_name_;
std::string archive_name_plus_slash_;
std::string padding_;
std::ofstream file_stream_;
std::function<size_t(const void*, size_t)> writer_func_;
uint64_t combined_uncomp_crc32_ = 0;
std::string serialization_id_;
bool compute_crc32_;
uint64_t alignment_;
// This number will be updated when the model has operators
// that have valid upgraders.
uint64_t version_ = kMinProducedFileFormatVersion;
bool finalized_ = false;
bool err_seen_ = false;
friend size_t ostream_write_func(
void* pOpaque,
uint64_t file_ofs,
const void* pBuf,
size_t n);
};
namespace detail {
// Returns a record to be appended to the local user extra data entry in order
// to make data beginning aligned at kFieldAlignment bytes boundary.
size_t getPadding(
size_t cursor,
size_t filename_size,
size_t size,
std::string& padding_buf,
uint64_t alignment);
std::tuple<size_t, size_t>
getOffset(size_t cursor, size_t filename_size, size_t size, uint64_t alignment);
} // namespace detail
} // namespace serialize
} // namespace caffe2
```
|
========================================================================================================================================
SOURCE CODE FILE: istream_adapter.h
LINES: 1
SIZE: 0.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\caffe2\serialize\istream_adapter.h
ENCODING: utf-8
```h
#pragma once
#include <istream>
#include "c10/macros/Macros.h"
#include "caffe2/serialize/read_adapter_interface.h"
namespace caffe2 {
namespace serialize {
// this is a reader implemented by std::istream
class TORCH_API IStreamAdapter final : public ReadAdapterInterface {
public:
C10_DISABLE_COPY_AND_ASSIGN(IStreamAdapter);
explicit IStreamAdapter(std::istream* istream);
size_t size() const override;
size_t read(uint64_t pos, void* buf, size_t n, const char* what = "")
const override;
~IStreamAdapter() override;
private:
std::istream* istream_;
void validate(const char* what) const;
};
} // namespace serialize
} // namespace caffe2
```
|
===============================================================================================================================================
SOURCE CODE FILE: read_adapter_interface.h
LINES: 1
SIZE: 0.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\caffe2\serialize\read_adapter_interface.h
ENCODING: utf-8
```h
#pragma once
#include <cstddef>
#include <cstdint>
#include "c10/macros/Macros.h"
namespace caffe2 {
namespace serialize {
// this is the interface for the (file/stream/memory) reader in
// PyTorchStreamReader. with this interface, we can extend the support
// besides standard istream
class TORCH_API ReadAdapterInterface {
public:
virtual size_t size() const = 0;
virtual size_t read(uint64_t pos, void* buf, size_t n, const char* what = "")
const = 0;
virtual ~ReadAdapterInterface();
};
} // namespace serialize
} // namespace caffe2
```
|
=================================================================================================================================
SOURCE CODE FILE: versions.h
LINES: 1
SIZE: 6.62 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\caffe2\serialize\versions.h
ENCODING: utf-8
```h
#pragma once
#include <cstdint>
namespace caffe2 {
namespace serialize {
constexpr uint64_t kMinSupportedFileFormatVersion = 0x1L;
constexpr uint64_t kMaxSupportedFileFormatVersion = 0xAL;
// Versions (i.e. why was the version number bumped?)
// Note [Dynamic Versions and torch.jit.save vs. torch.save]
//
// Our versioning scheme has a "produced file format version" which
// describes how an archive is to be read. The version written in an archive
// is at least this current produced file format version, but may be greater
// if it includes certain symbols. We refer to these conditional versions
// as "dynamic," since they are identified at runtime.
//
// Dynamic versioning is useful when an operator's semantics are updated.
// When using torch.jit.save we want those semantics to be preserved. If
// we bumped the produced file format version on every change, however,
// then older versions of PyTorch couldn't read even simple archives, like
// a single tensor, from newer versions of PyTorch. Instead, we
// assign dynamic versions to these changes that override the
// produced file format version as needed. That is, when the semantics
// of torch.div changed it was assigned dynamic version 4, and when
// torch.jit.saving modules that use torch.div those archives also have
// (at least) version 4. This prevents earlier versions of PyTorch
// from accidentally performing the wrong kind of division. Modules
// that don't use torch.div or other operators with dynamic versions
// can write the produced file format version, and these programs will
// run as expected on earlier versions of PyTorch.
//
// While torch.jit.save attempts to preserve operator semantics,
// torch.save does not. torch.save is analogous to pickling Python, so
// a function that uses torch.div will have different behavior if torch.saved
// and torch.loaded across PyTorch versions. From a technical perspective,
// torch.save ignores dynamic versioning.
// 1. Initial version
// 2. Removed op_version_set version numbers
// 3. Added type tags to pickle serialization of container types
// 4. (Dynamic) Stopped integer division using torch.div
// (a versioned symbol preserves the historic behavior of versions 1--3)
// 5. (Dynamic) Stops torch.full inferring a floating point dtype
// when given bool or integer fill values.
// 6. Write version string to `./data/version` instead of `version`.
// [12/15/2021]
// kProducedFileFormatVersion is set to 7 from 3 due to a different
// interpretation of what file format version is.
// Whenever there is new upgrader introduced,
// this number should be bumped.
// The reasons that version is bumped in the past:
// 1. aten::div is changed at version 4
// 2. aten::full is changed at version 5
// 3. torch.package uses version 6
// 4. Introduce new upgrader design and set the version number to 7
// mark this change
// --------------------------------------------------
// We describe new operator version bump reasons here:
// 1) [01/24/2022]
// We bump the version number to 8 to update aten::linspace
// and aten::linspace.out to error out when steps is not
// provided. (see: https://github.com/pytorch/pytorch/issues/55951)
// 2) [01/30/2022]
// Bump the version number to 9 to update aten::logspace and
// and aten::logspace.out to error out when steps is not
// provided. (see: https://github.com/pytorch/pytorch/issues/55951)
// 3) [02/11/2022]
// Bump the version number to 10 to update aten::gelu and
// and aten::gelu.out to support the new approximate kwarg.
// (see: https://github.com/pytorch/pytorch/pull/61439)
constexpr uint64_t kProducedFileFormatVersion = 0xAL;
// Absolute minimum version we will write packages. This
// means that every package from now on will always be
// greater than this number.
constexpr uint64_t kMinProducedFileFormatVersion = 0x3L;
// The version we write when the archive contains bytecode.
// It must be higher or eq to kProducedFileFormatVersion.
// Because torchscript changes is likely introduce bytecode change.
// If kProducedFileFormatVersion is increased, kProducedBytecodeVersion
// should be increased too. The relationship is:
// kMaxSupportedFileFormatVersion >= (most likely ==) kProducedBytecodeVersion
// >= kProducedFileFormatVersion
// If a format change is forward compatible (still readable by older
// executables), we will not increment the version number, to minimize the
// risk of breaking existing clients. TODO: A better way would be to allow
// the caller that creates a model to specify a maximum version that its
// clients can accept.
// Versions:
// 0x1L: Initial version
// 0x2L: (Comment missing)
// 0x3L: (Comment missing)
// 0x4L: (update) Added schema to function tuple. Forward-compatible change.
// 0x5L: (update) Update bytecode is sharing constant tensor files from
// torchscript, and only serialize extra tensors that are not in the
// torchscript constant table. Also update tensor storage schema adapting to
// the unify format, the root key of tensor storage is updated from {index} to
// {the_pointer_value_the_tensor.storage}, for example:
// `140245072983168.storage` Forward-compatibility change.
// 0x6L: Implicit opereator versioning using number of specified argument.
// Refer to the summary of https://github.com/pytorch/pytorch/pull/56845 for
// details.
// 0x7L: Enable support for operators with default arguments plus out
// arguments. Refer. See https://github.com/pytorch/pytorch/pull/63651 for
// details.
// 0x8L: Emit promoted operators as instructions. See
// https://github.com/pytorch/pytorch/pull/71662 for details.
// 0x9L: Change serialization format from pickle to format This version is to
// serve migration. v8 pickle and v9 flatbuffer are the same. Refer to the
// summary of https://github.com/pytorch/pytorch/pull/75201 for more details.
constexpr uint64_t kProducedBytecodeVersion = 0x8L;
// static_assert(
// kProducedBytecodeVersion >= kProducedFileFormatVersion,
// "kProducedBytecodeVersion must be higher or equal to
// kProducedFileFormatVersion.");
// Introduce kMinSupportedBytecodeVersion and kMaxSupportedBytecodeVersion
// for limited backward/forward compatibility support of bytecode. If
// kMinSupportedBytecodeVersion <= model_version <= kMaxSupportedBytecodeVersion
// (in loader), we should support this model_version. For example, we provide a
// wrapper to handle an updated operator.
constexpr uint64_t kMinSupportedBytecodeVersion = 0x4L;
constexpr uint64_t kMaxSupportedBytecodeVersion = 0x9L;
} // namespace serialize
} // namespace caffe2
```
|
===============================================================================================================
SOURCE CODE FILE: cpuinfo.h
LINES: 1
SIZE: 55.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\cpuinfo.h
ENCODING: utf-8
```h
#pragma once
#ifndef CPUINFO_H
#define CPUINFO_H
#ifndef __cplusplus
#include <stdbool.h>
#endif
#ifdef __APPLE__
#include <TargetConditionals.h>
#endif
#include <stdint.h>
/* Identify architecture and define corresponding macro */
#if defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86)
#define CPUINFO_ARCH_X86 1
#endif
#if defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64)
#define CPUINFO_ARCH_X86_64 1
#endif
#if defined(__arm__) || defined(_M_ARM)
#define CPUINFO_ARCH_ARM 1
#endif
#if defined(__aarch64__) || defined(_M_ARM64)
#define CPUINFO_ARCH_ARM64 1
#endif
#if defined(__PPC64__) || defined(__powerpc64__) || defined(_ARCH_PPC64)
#define CPUINFO_ARCH_PPC64 1
#endif
#if defined(__asmjs__)
#define CPUINFO_ARCH_ASMJS 1
#endif
#if defined(__wasm__)
#if defined(__wasm_simd128__)
#define CPUINFO_ARCH_WASMSIMD 1
#else
#define CPUINFO_ARCH_WASM 1
#endif
#endif
#if defined(__riscv)
#if (__riscv_xlen == 32)
#define CPUINFO_ARCH_RISCV32 1
#elif (__riscv_xlen == 64)
#define CPUINFO_ARCH_RISCV64 1
#endif
#endif
/* Define other architecture-specific macros as 0 */
#ifndef CPUINFO_ARCH_X86
#define CPUINFO_ARCH_X86 0
#endif
#ifndef CPUINFO_ARCH_X86_64
#define CPUINFO_ARCH_X86_64 0
#endif
#ifndef CPUINFO_ARCH_ARM
#define CPUINFO_ARCH_ARM 0
#endif
#ifndef CPUINFO_ARCH_ARM64
#define CPUINFO_ARCH_ARM64 0
#endif
#ifndef CPUINFO_ARCH_PPC64
#define CPUINFO_ARCH_PPC64 0
#endif
#ifndef CPUINFO_ARCH_ASMJS
#define CPUINFO_ARCH_ASMJS 0
#endif
#ifndef CPUINFO_ARCH_WASM
#define CPUINFO_ARCH_WASM 0
#endif
#ifndef CPUINFO_ARCH_WASMSIMD
#define CPUINFO_ARCH_WASMSIMD 0
#endif
#ifndef CPUINFO_ARCH_RISCV32
#define CPUINFO_ARCH_RISCV32 0
#endif
#ifndef CPUINFO_ARCH_RISCV64
#define CPUINFO_ARCH_RISCV64 0
#endif
#if CPUINFO_ARCH_X86 && defined(_MSC_VER)
#define CPUINFO_ABI __cdecl
#elif CPUINFO_ARCH_X86 && defined(__GNUC__)
#define CPUINFO_ABI __attribute__((__cdecl__))
#else
#define CPUINFO_ABI
#endif
#define CPUINFO_CACHE_UNIFIED 0x00000001
#define CPUINFO_CACHE_INCLUSIVE 0x00000002
#define CPUINFO_CACHE_COMPLEX_INDEXING 0x00000004
struct cpuinfo_cache {
/** Cache size in bytes */
uint32_t size;
/** Number of ways of associativity */
uint32_t associativity;
/** Number of sets */
uint32_t sets;
/** Number of partitions */
uint32_t partitions;
/** Line size in bytes */
uint32_t line_size;
/**
* Binary characteristics of the cache (unified cache, inclusive cache,
* cache with complex indexing).
*
* @see CPUINFO_CACHE_UNIFIED, CPUINFO_CACHE_INCLUSIVE,
* CPUINFO_CACHE_COMPLEX_INDEXING
*/
uint32_t flags;
/** Index of the first logical processor that shares this cache */
uint32_t processor_start;
/** Number of logical processors that share this cache */
uint32_t processor_count;
};
struct cpuinfo_trace_cache {
uint32_t uops;
uint32_t associativity;
};
#define CPUINFO_PAGE_SIZE_4KB 0x1000
#define CPUINFO_PAGE_SIZE_1MB 0x100000
#define CPUINFO_PAGE_SIZE_2MB 0x200000
#define CPUINFO_PAGE_SIZE_4MB 0x400000
#define CPUINFO_PAGE_SIZE_16MB 0x1000000
#define CPUINFO_PAGE_SIZE_1GB 0x40000000
struct cpuinfo_tlb {
uint32_t entries;
uint32_t associativity;
uint64_t pages;
};
/** Vendor of processor core design */
enum cpuinfo_vendor {
/** Processor vendor is not known to the library, or the library failed
to get vendor information from the OS. */
cpuinfo_vendor_unknown = 0,
/* Active vendors of modern CPUs */
/**
* Intel Corporation. Vendor of x86, x86-64, IA64, and ARM processor
* microarchitectures.
*
* Sold its ARM design subsidiary in 2006. The last ARM processor design
* was released in 2004.
*/
cpuinfo_vendor_intel = 1,
/** Advanced Micro Devices, Inc. Vendor of x86 and x86-64 processor
microarchitectures. */
cpuinfo_vendor_amd = 2,
/** ARM Holdings plc. Vendor of ARM and ARM64 processor
microarchitectures. */
cpuinfo_vendor_arm = 3,
/** Qualcomm Incorporated. Vendor of ARM and ARM64 processor
microarchitectures. */
cpuinfo_vendor_qualcomm = 4,
/** Apple Inc. Vendor of ARM and ARM64 processor microarchitectures. */
cpuinfo_vendor_apple = 5,
/** Samsung Electronics Co., Ltd. Vendir if ARM64 processor
microarchitectures. */
cpuinfo_vendor_samsung = 6,
/** Nvidia Corporation. Vendor of ARM64-compatible processor
microarchitectures. */
cpuinfo_vendor_nvidia = 7,
/** MIPS Technologies, Inc. Vendor of MIPS processor microarchitectures.
*/
cpuinfo_vendor_mips = 8,
/** International Business Machines Corporation. Vendor of PowerPC
processor microarchitectures. */
cpuinfo_vendor_ibm = 9,
/** Ingenic Semiconductor. Vendor of MIPS processor microarchitectures.
*/
cpuinfo_vendor_ingenic = 10,
/**
* VIA Technologies, Inc. Vendor of x86 and x86-64 processor
* microarchitectures.
*
* Processors are designed by Centaur Technology, a subsidiary of VIA
* Technologies.
*/
cpuinfo_vendor_via = 11,
/** Cavium, Inc. Vendor of ARM64 processor microarchitectures. */
cpuinfo_vendor_cavium = 12,
/** Broadcom, Inc. Vendor of ARM processor microarchitectures. */
cpuinfo_vendor_broadcom = 13,
/** Applied Micro Circuits Corporation (APM). Vendor of ARM64 processor
microarchitectures. */
cpuinfo_vendor_apm = 14,
/**
* Huawei Technologies Co., Ltd. Vendor of ARM64 processor
* microarchitectures.
*
* Processors are designed by HiSilicon, a subsidiary of Huawei.
*/
cpuinfo_vendor_huawei = 15,
/**
* Hygon (Chengdu Haiguang Integrated Circuit Design Co., Ltd), Vendor
* of x86-64 processor microarchitectures.
*
* Processors are variants of AMD cores.
*/
cpuinfo_vendor_hygon = 16,
/** SiFive, Inc. Vendor of RISC-V processor microarchitectures. */
cpuinfo_vendor_sifive = 17,
/* Active vendors of embedded CPUs */
/** Texas Instruments Inc. Vendor of ARM processor microarchitectures.
*/
cpuinfo_vendor_texas_instruments = 30,
/** Marvell Technology Group Ltd. Vendor of ARM processor
* microarchitectures.
*/
cpuinfo_vendor_marvell = 31,
/** RDC Semiconductor Co., Ltd. Vendor of x86 processor
microarchitectures. */
cpuinfo_vendor_rdc = 32,
/** DM&P Electronics Inc. Vendor of x86 processor microarchitectures. */
cpuinfo_vendor_dmp = 33,
/** Motorola, Inc. Vendor of PowerPC and ARM processor
microarchitectures. */
cpuinfo_vendor_motorola = 34,
/* Defunct CPU vendors */
/**
* Transmeta Corporation. Vendor of x86 processor microarchitectures.
*
* Now defunct. The last processor design was released in 2004.
* Transmeta processors implemented VLIW ISA and used binary translation
* to execute x86 code.
*/
cpuinfo_vendor_transmeta = 50,
/**
* Cyrix Corporation. Vendor of x86 processor microarchitectures.
*
* Now defunct. The last processor design was released in 1996.
*/
cpuinfo_vendor_cyrix = 51,
/**
* Rise Technology. Vendor of x86 processor microarchitectures.
*
* Now defunct. The last processor design was released in 1999.
*/
cpuinfo_vendor_rise = 52,
/**
* National Semiconductor. Vendor of x86 processor microarchitectures.
*
* Sold its x86 design subsidiary in 1999. The last processor design was
* released in 1998.
*/
cpuinfo_vendor_nsc = 53,
/**
* Silicon Integrated Systems. Vendor of x86 processor
* microarchitectures.
*
* Sold its x86 design subsidiary in 2001. The last processor design was
* released in 2001.
*/
cpuinfo_vendor_sis = 54,
/**
* NexGen. Vendor of x86 processor microarchitectures.
*
* Now defunct. The last processor design was released in 1994.
* NexGen designed the first x86 microarchitecture which decomposed x86
* instructions into simple microoperations.
*/
cpuinfo_vendor_nexgen = 55,
/**
* United Microelectronics Corporation. Vendor of x86 processor
* microarchitectures.
*
* Ceased x86 in the early 1990s. The last processor design was released
* in 1991. Designed U5C and U5D processors. Both are 486 level.
*/
cpuinfo_vendor_umc = 56,
/**
* Digital Equipment Corporation. Vendor of ARM processor
* microarchitecture.
*
* Sold its ARM designs in 1997. The last processor design was released
* in 1997.
*/
cpuinfo_vendor_dec = 57,
};
/**
* Processor microarchitecture
*
* Processors with different microarchitectures often have different instruction
* performance characteristics, and may have dramatically different pipeline
* organization.
*/
enum cpuinfo_uarch {
/** Microarchitecture is unknown, or the library failed to get
information about the microarchitecture from OS */
cpuinfo_uarch_unknown = 0,
/** Pentium and Pentium MMX microarchitecture. */
cpuinfo_uarch_p5 = 0x00100100,
/** Intel Quark microarchitecture. */
cpuinfo_uarch_quark = 0x00100101,
/** Pentium Pro, Pentium II, and Pentium III. */
cpuinfo_uarch_p6 = 0x00100200,
/** Pentium M. */
cpuinfo_uarch_dothan = 0x00100201,
/** Intel Core microarchitecture. */
cpuinfo_uarch_yonah = 0x00100202,
/** Intel Core 2 microarchitecture on 65 nm process. */
cpuinfo_uarch_conroe = 0x00100203,
/** Intel Core 2 microarchitecture on 45 nm process. */
cpuinfo_uarch_penryn = 0x00100204,
/** Intel Nehalem and Westmere microarchitectures (Core i3/i5/i7 1st
gen). */
cpuinfo_uarch_nehalem = 0x00100205,
/** Intel Sandy Bridge microarchitecture (Core i3/i5/i7 2nd gen). */
cpuinfo_uarch_sandy_bridge = 0x00100206,
/** Intel Ivy Bridge microarchitecture (Core i3/i5/i7 3rd gen). */
cpuinfo_uarch_ivy_bridge = 0x00100207,
/** Intel Haswell microarchitecture (Core i3/i5/i7 4th gen). */
cpuinfo_uarch_haswell = 0x00100208,
/** Intel Broadwell microarchitecture. */
cpuinfo_uarch_broadwell = 0x00100209,
/** Intel Sky Lake microarchitecture (14 nm, including
Kaby/Coffee/Whiskey/Amber/Comet/Cascade/Cooper Lake). */
cpuinfo_uarch_sky_lake = 0x0010020A,
/** DEPRECATED (Intel Kaby Lake microarchitecture). */
cpuinfo_uarch_kaby_lake = 0x0010020A,
/** Intel Palm Cove microarchitecture (10 nm, Cannon Lake). */
cpuinfo_uarch_palm_cove = 0x0010020B,
/** Intel Sunny Cove microarchitecture (10 nm, Ice Lake). */
cpuinfo_uarch_sunny_cove = 0x0010020C,
/** Pentium 4 with Willamette, Northwood, or Foster cores. */
cpuinfo_uarch_willamette = 0x00100300,
/** Pentium 4 with Prescott and later cores. */
cpuinfo_uarch_prescott = 0x00100301,
/** Intel Atom on 45 nm process. */
cpuinfo_uarch_bonnell = 0x00100400,
/** Intel Atom on 32 nm process. */
cpuinfo_uarch_saltwell = 0x00100401,
/** Intel Silvermont microarchitecture (22 nm out-of-order Atom). */
cpuinfo_uarch_silvermont = 0x00100402,
/** Intel Airmont microarchitecture (14 nm out-of-order Atom). */
cpuinfo_uarch_airmont = 0x00100403,
/** Intel Goldmont microarchitecture (Denverton, Apollo Lake). */
cpuinfo_uarch_goldmont = 0x00100404,
/** Intel Goldmont Plus microarchitecture (Gemini Lake). */
cpuinfo_uarch_goldmont_plus = 0x00100405,
/** Intel Knights Ferry HPC boards. */
cpuinfo_uarch_knights_ferry = 0x00100500,
/** Intel Knights Corner HPC boards (aka Xeon Phi). */
cpuinfo_uarch_knights_corner = 0x00100501,
/** Intel Knights Landing microarchitecture (second-gen MIC). */
cpuinfo_uarch_knights_landing = 0x00100502,
/** Intel Knights Hill microarchitecture (third-gen MIC). */
cpuinfo_uarch_knights_hill = 0x00100503,
/** Intel Knights Mill Xeon Phi. */
cpuinfo_uarch_knights_mill = 0x00100504,
/** Intel/Marvell XScale series. */
cpuinfo_uarch_xscale = 0x00100600,
/** AMD K5. */
cpuinfo_uarch_k5 = 0x00200100,
/** AMD K6 and alike. */
cpuinfo_uarch_k6 = 0x00200101,
/** AMD Athlon and Duron. */
cpuinfo_uarch_k7 = 0x00200102,
/** AMD Athlon 64, Opteron 64. */
cpuinfo_uarch_k8 = 0x00200103,
/** AMD Family 10h (Barcelona, Istambul, Magny-Cours). */
cpuinfo_uarch_k10 = 0x00200104,
/**
* AMD Bulldozer microarchitecture
* Zambezi FX-series CPUs, Zurich, Valencia and Interlagos Opteron CPUs.
*/
cpuinfo_uarch_bulldozer = 0x00200105,
/**
* AMD Piledriver microarchitecture
* Vishera FX-series CPUs, Trinity and Richland APUs, Delhi, Seoul, Abu
* Dhabi Opteron CPUs.
*/
cpuinfo_uarch_piledriver = 0x00200106,
/** AMD Steamroller microarchitecture (Kaveri APUs). */
cpuinfo_uarch_steamroller = 0x00200107,
/** AMD Excavator microarchitecture (Carizzo APUs). */
cpuinfo_uarch_excavator = 0x00200108,
/** AMD Zen microarchitecture (12/14 nm Ryzen and EPYC CPUs). */
cpuinfo_uarch_zen = 0x00200109,
/** AMD Zen 2 microarchitecture (7 nm Ryzen and EPYC CPUs). */
cpuinfo_uarch_zen2 = 0x0020010A,
/** AMD Zen 3 microarchitecture. */
cpuinfo_uarch_zen3 = 0x0020010B,
/** AMD Zen 4 microarchitecture. */
cpuinfo_uarch_zen4 = 0x0020010C,
/** NSC Geode and AMD Geode GX and LX. */
cpuinfo_uarch_geode = 0x00200200,
/** AMD Bobcat mobile microarchitecture. */
cpuinfo_uarch_bobcat = 0x00200201,
/** AMD Jaguar mobile microarchitecture. */
cpuinfo_uarch_jaguar = 0x00200202,
/** AMD Puma mobile microarchitecture. */
cpuinfo_uarch_puma = 0x00200203,
/** ARM7 series. */
cpuinfo_uarch_arm7 = 0x00300100,
/** ARM9 series. */
cpuinfo_uarch_arm9 = 0x00300101,
/** ARM 1136, ARM 1156, ARM 1176, or ARM 11MPCore. */
cpuinfo_uarch_arm11 = 0x00300102,
/** ARM Cortex-A5. */
cpuinfo_uarch_cortex_a5 = 0x00300205,
/** ARM Cortex-A7. */
cpuinfo_uarch_cortex_a7 = 0x00300207,
/** ARM Cortex-A8. */
cpuinfo_uarch_cortex_a8 = 0x00300208,
/** ARM Cortex-A9. */
cpuinfo_uarch_cortex_a9 = 0x00300209,
/** ARM Cortex-A12. */
cpuinfo_uarch_cortex_a12 = 0x00300212,
/** ARM Cortex-A15. */
cpuinfo_uarch_cortex_a15 = 0x00300215,
/** ARM Cortex-A17. */
cpuinfo_uarch_cortex_a17 = 0x00300217,
/** ARM Cortex-A32. */
cpuinfo_uarch_cortex_a32 = 0x00300332,
/** ARM Cortex-A35. */
cpuinfo_uarch_cortex_a35 = 0x00300335,
/** ARM Cortex-A53. */
cpuinfo_uarch_cortex_a53 = 0x00300353,
/** ARM Cortex-A55 revision 0 (restricted dual-issue capabilities
compared to revision 1+). */
cpuinfo_uarch_cortex_a55r0 = 0x00300354,
/** ARM Cortex-A55. */
cpuinfo_uarch_cortex_a55 = 0x00300355,
/** ARM Cortex-A57. */
cpuinfo_uarch_cortex_a57 = 0x00300357,
/** ARM Cortex-A65. */
cpuinfo_uarch_cortex_a65 = 0x00300365,
/** ARM Cortex-A72. */
cpuinfo_uarch_cortex_a72 = 0x00300372,
/** ARM Cortex-A73. */
cpuinfo_uarch_cortex_a73 = 0x00300373,
/** ARM Cortex-A75. */
cpuinfo_uarch_cortex_a75 = 0x00300375,
/** ARM Cortex-A76. */
cpuinfo_uarch_cortex_a76 = 0x00300376,
/** ARM Cortex-A77. */
cpuinfo_uarch_cortex_a77 = 0x00300377,
/** ARM Cortex-A78. */
cpuinfo_uarch_cortex_a78 = 0x00300378,
/** ARM Neoverse N1. */
cpuinfo_uarch_neoverse_n1 = 0x00300400,
/** ARM Neoverse E1. */
cpuinfo_uarch_neoverse_e1 = 0x00300401,
/** ARM Neoverse V1. */
cpuinfo_uarch_neoverse_v1 = 0x00300402,
/** ARM Neoverse N2. */
cpuinfo_uarch_neoverse_n2 = 0x00300403,
/** ARM Neoverse V2. */
cpuinfo_uarch_neoverse_v2 = 0x00300404,
/** ARM Cortex-X1. */
cpuinfo_uarch_cortex_x1 = 0x00300501,
/** ARM Cortex-X2. */
cpuinfo_uarch_cortex_x2 = 0x00300502,
/** ARM Cortex-X3. */
cpuinfo_uarch_cortex_x3 = 0x00300503,
/** ARM Cortex-X4. */
cpuinfo_uarch_cortex_x4 = 0x00300504,
/** ARM Cortex-A510. */
cpuinfo_uarch_cortex_a510 = 0x00300551,
/** ARM Cortex-A520. */
cpuinfo_uarch_cortex_a520 = 0x00300552,
/** ARM Cortex-A710. */
cpuinfo_uarch_cortex_a710 = 0x00300571,
/** ARM Cortex-A715. */
cpuinfo_uarch_cortex_a715 = 0x00300572,
/** ARM Cortex-A720. */
cpuinfo_uarch_cortex_a720 = 0x00300573,
/** Qualcomm Scorpion. */
cpuinfo_uarch_scorpion = 0x00400100,
/** Qualcomm Krait. */
cpuinfo_uarch_krait = 0x00400101,
/** Qualcomm Kryo. */
cpuinfo_uarch_kryo = 0x00400102,
/** Qualcomm Falkor. */
cpuinfo_uarch_falkor = 0x00400103,
/** Qualcomm Saphira. */
cpuinfo_uarch_saphira = 0x00400104,
/** Nvidia Denver. */
cpuinfo_uarch_denver = 0x00500100,
/** Nvidia Denver 2. */
cpuinfo_uarch_denver2 = 0x00500101,
/** Nvidia Carmel. */
cpuinfo_uarch_carmel = 0x00500102,
/** Samsung Exynos M1 (Exynos 8890 big cores). */
cpuinfo_uarch_exynos_m1 = 0x00600100,
/** Samsung Exynos M2 (Exynos 8895 big cores). */
cpuinfo_uarch_exynos_m2 = 0x00600101,
/** Samsung Exynos M3 (Exynos 9810 big cores). */
cpuinfo_uarch_exynos_m3 = 0x00600102,
/** Samsung Exynos M4 (Exynos 9820 big cores). */
cpuinfo_uarch_exynos_m4 = 0x00600103,
/** Samsung Exynos M5 (Exynos 9830 big cores). */
cpuinfo_uarch_exynos_m5 = 0x00600104,
/* Deprecated synonym for Cortex-A76 */
cpuinfo_uarch_cortex_a76ae = 0x00300376,
/* Deprecated names for Exynos. */
cpuinfo_uarch_mongoose_m1 = 0x00600100,
cpuinfo_uarch_mongoose_m2 = 0x00600101,
cpuinfo_uarch_meerkat_m3 = 0x00600102,
cpuinfo_uarch_meerkat_m4 = 0x00600103,
/** Apple A6 and A6X processors. */
cpuinfo_uarch_swift = 0x00700100,
/** Apple A7 processor. */
cpuinfo_uarch_cyclone = 0x00700101,
/** Apple A8 and A8X processor. */
cpuinfo_uarch_typhoon = 0x00700102,
/** Apple A9 and A9X processor. */
cpuinfo_uarch_twister = 0x00700103,
/** Apple A10 and A10X processor. */
cpuinfo_uarch_hurricane = 0x00700104,
/** Apple A11 processor (big cores). */
cpuinfo_uarch_monsoon = 0x00700105,
/** Apple A11 processor (little cores). */
cpuinfo_uarch_mistral = 0x00700106,
/** Apple A12 processor (big cores). */
cpuinfo_uarch_vortex = 0x00700107,
/** Apple A12 processor (little cores). */
cpuinfo_uarch_tempest = 0x00700108,
/** Apple A13 processor (big cores). */
cpuinfo_uarch_lightning = 0x00700109,
/** Apple A13 processor (little cores). */
cpuinfo_uarch_thunder = 0x0070010A,
/** Apple A14 / M1 processor (big cores). */
cpuinfo_uarch_firestorm = 0x0070010B,
/** Apple A14 / M1 processor (little cores). */
cpuinfo_uarch_icestorm = 0x0070010C,
/** Apple A15 / M2 processor (big cores). */
cpuinfo_uarch_avalanche = 0x0070010D,
/** Apple A15 / M2 processor (little cores). */
cpuinfo_uarch_blizzard = 0x0070010E,
/** Cavium ThunderX. */
cpuinfo_uarch_thunderx = 0x00800100,
/** Cavium ThunderX2 (originally Broadcom Vulkan). */
cpuinfo_uarch_thunderx2 = 0x00800200,
/** Marvell PJ4. */
cpuinfo_uarch_pj4 = 0x00900100,
/** Broadcom Brahma B15. */
cpuinfo_uarch_brahma_b15 = 0x00A00100,
/** Broadcom Brahma B53. */
cpuinfo_uarch_brahma_b53 = 0x00A00101,
/** Applied Micro X-Gene. */
cpuinfo_uarch_xgene = 0x00B00100,
/* Hygon Dhyana (a modification of AMD Zen for Chinese market). */
cpuinfo_uarch_dhyana = 0x01000100,
/** HiSilicon TaiShan v110 (Huawei Kunpeng 920 series processors). */
cpuinfo_uarch_taishan_v110 = 0x00C00100,
};
struct cpuinfo_processor {
/** SMT (hyperthread) ID within a core */
uint32_t smt_id;
/** Core containing this logical processor */
const struct cpuinfo_core* core;
/** Cluster of cores containing this logical processor */
const struct cpuinfo_cluster* cluster;
/** Physical package containing this logical processor */
const struct cpuinfo_package* package;
#if defined(__linux__)
/**
* Linux-specific ID for the logical processor:
* - Linux kernel exposes information about this logical processor in
* /sys/devices/system/cpu/cpu<linux_id>/
* - Bit <linux_id> in the cpu_set_t identifies this logical processor
*/
int linux_id;
#endif
#if defined(_WIN32) || defined(__CYGWIN__)
/** Windows-specific ID for the group containing the logical processor.
*/
uint16_t windows_group_id;
/**
* Windows-specific ID of the logical processor within its group:
* - Bit <windows_processor_id> in the KAFFINITY mask identifies this
* logical processor within its group.
*/
uint16_t windows_processor_id;
#endif
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
/** APIC ID (unique x86-specific ID of the logical processor) */
uint32_t apic_id;
#endif
struct {
/** Level 1 instruction cache */
const struct cpuinfo_cache* l1i;
/** Level 1 data cache */
const struct cpuinfo_cache* l1d;
/** Level 2 unified or data cache */
const struct cpuinfo_cache* l2;
/** Level 3 unified or data cache */
const struct cpuinfo_cache* l3;
/** Level 4 unified or data cache */
const struct cpuinfo_cache* l4;
} cache;
};
struct cpuinfo_core {
/** Index of the first logical processor on this core. */
uint32_t processor_start;
/** Number of logical processors on this core */
uint32_t processor_count;
/** Core ID within a package */
uint32_t core_id;
/** Cluster containing this core */
const struct cpuinfo_cluster* cluster;
/** Physical package containing this core. */
const struct cpuinfo_package* package;
/** Vendor of the CPU microarchitecture for this core */
enum cpuinfo_vendor vendor;
/** CPU microarchitecture for this core */
enum cpuinfo_uarch uarch;
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
/** Value of CPUID leaf 1 EAX register for this core */
uint32_t cpuid;
#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
/** Value of Main ID Register (MIDR) for this core */
uint32_t midr;
#endif
/** Clock rate (non-Turbo) of the core, in Hz */
uint64_t frequency;
};
struct cpuinfo_cluster {
/** Index of the first logical processor in the cluster */
uint32_t processor_start;
/** Number of logical processors in the cluster */
uint32_t processor_count;
/** Index of the first core in the cluster */
uint32_t core_start;
/** Number of cores on the cluster */
uint32_t core_count;
/** Cluster ID within a package */
uint32_t cluster_id;
/** Physical package containing the cluster */
const struct cpuinfo_package* package;
/** CPU microarchitecture vendor of the cores in the cluster */
enum cpuinfo_vendor vendor;
/** CPU microarchitecture of the cores in the cluster */
enum cpuinfo_uarch uarch;
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
/** Value of CPUID leaf 1 EAX register of the cores in the cluster */
uint32_t cpuid;
#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
/** Value of Main ID Register (MIDR) of the cores in the cluster */
uint32_t midr;
#endif
/** Clock rate (non-Turbo) of the cores in the cluster, in Hz */
uint64_t frequency;
};
#define CPUINFO_PACKAGE_NAME_MAX 48
struct cpuinfo_package {
/** SoC or processor chip model name */
char name[CPUINFO_PACKAGE_NAME_MAX];
/** Index of the first logical processor on this physical package */
uint32_t processor_start;
/** Number of logical processors on this physical package */
uint32_t processor_count;
/** Index of the first core on this physical package */
uint32_t core_start;
/** Number of cores on this physical package */
uint32_t core_count;
/** Index of the first cluster of cores on this physical package */
uint32_t cluster_start;
/** Number of clusters of cores on this physical package */
uint32_t cluster_count;
};
struct cpuinfo_uarch_info {
/** Type of CPU microarchitecture */
enum cpuinfo_uarch uarch;
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
/** Value of CPUID leaf 1 EAX register for the microarchitecture */
uint32_t cpuid;
#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
/** Value of Main ID Register (MIDR) for the microarchitecture */
uint32_t midr;
#endif
/** Number of logical processors with the microarchitecture */
uint32_t processor_count;
/** Number of cores with the microarchitecture */
uint32_t core_count;
};
#ifdef __cplusplus
extern "C" {
#endif
bool CPUINFO_ABI cpuinfo_initialize(void);
void CPUINFO_ABI cpuinfo_deinitialize(void);
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
/* This structure is not a part of stable API. Use cpuinfo_has_x86_* functions
* instead. */
struct cpuinfo_x86_isa {
#if CPUINFO_ARCH_X86
bool rdtsc;
#endif
bool rdtscp;
bool rdpid;
bool sysenter;
#if CPUINFO_ARCH_X86
bool syscall;
#endif
bool msr;
bool clzero;
bool clflush;
bool clflushopt;
bool mwait;
bool mwaitx;
#if CPUINFO_ARCH_X86
bool emmx;
#endif
bool fxsave;
bool xsave;
#if CPUINFO_ARCH_X86
bool fpu;
bool mmx;
bool mmx_plus;
#endif
bool three_d_now;
bool three_d_now_plus;
#if CPUINFO_ARCH_X86
bool three_d_now_geode;
#endif
bool prefetch;
bool prefetchw;
bool prefetchwt1;
#if CPUINFO_ARCH_X86
bool daz;
bool sse;
bool sse2;
#endif
bool sse3;
bool ssse3;
bool sse4_1;
bool sse4_2;
bool sse4a;
bool misaligned_sse;
bool avx;
bool avxvnni;
bool fma3;
bool fma4;
bool xop;
bool f16c;
bool avx2;
bool avx512f;
bool avx512pf;
bool avx512er;
bool avx512cd;
bool avx512dq;
bool avx512bw;
bool avx512vl;
bool avx512ifma;
bool avx512vbmi;
bool avx512vbmi2;
bool avx512bitalg;
bool avx512vpopcntdq;
bool avx512vnni;
bool avx512bf16;
bool avx512fp16;
bool avx512vp2intersect;
bool avx512_4vnniw;
bool avx512_4fmaps;
bool amx_bf16;
bool amx_tile;
bool amx_int8;
bool amx_fp16;
bool avx_vnni_int8;
bool avx_vnni_int16;
bool avx_ne_convert;
bool hle;
bool rtm;
bool xtest;
bool mpx;
#if CPUINFO_ARCH_X86
bool cmov;
bool cmpxchg8b;
#endif
bool cmpxchg16b;
bool clwb;
bool movbe;
#if CPUINFO_ARCH_X86_64
bool lahf_sahf;
#endif
bool fs_gs_base;
bool lzcnt;
bool popcnt;
bool tbm;
bool bmi;
bool bmi2;
bool adx;
bool aes;
bool vaes;
bool pclmulqdq;
bool vpclmulqdq;
bool gfni;
bool rdrand;
bool rdseed;
bool sha;
bool rng;
bool ace;
bool ace2;
bool phe;
bool pmm;
bool lwp;
};
extern struct cpuinfo_x86_isa cpuinfo_isa;
#endif
static inline bool cpuinfo_has_x86_rdtsc(void) {
#if CPUINFO_ARCH_X86_64
return true;
#elif CPUINFO_ARCH_X86
#if defined(__ANDROID__)
return true;
#else
return cpuinfo_isa.rdtsc;
#endif
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_rdtscp(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.rdtscp;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_rdpid(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.rdpid;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_clzero(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.clzero;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_mwait(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.mwait;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_mwaitx(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.mwaitx;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_fxsave(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.fxsave;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_xsave(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.xsave;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_fpu(void) {
#if CPUINFO_ARCH_X86_64
return true;
#elif CPUINFO_ARCH_X86
#if defined(__ANDROID__)
return true;
#else
return cpuinfo_isa.fpu;
#endif
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_mmx(void) {
#if CPUINFO_ARCH_X86_64
return true;
#elif CPUINFO_ARCH_X86
#if defined(__ANDROID__)
return true;
#else
return cpuinfo_isa.mmx;
#endif
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_mmx_plus(void) {
#if CPUINFO_ARCH_X86_64
return true;
#elif CPUINFO_ARCH_X86
#if defined(__ANDROID__)
return true;
#else
return cpuinfo_isa.mmx_plus;
#endif
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_3dnow(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.three_d_now;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_3dnow_plus(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.three_d_now_plus;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_3dnow_geode(void) {
#if CPUINFO_ARCH_X86_64
return false;
#elif CPUINFO_ARCH_X86
#if defined(__ANDROID__)
return false;
#else
return cpuinfo_isa.three_d_now_geode;
#endif
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_prefetch(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.prefetch;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_prefetchw(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.prefetchw;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_prefetchwt1(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.prefetchwt1;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_daz(void) {
#if CPUINFO_ARCH_X86_64
return true;
#elif CPUINFO_ARCH_X86
#if defined(__ANDROID__)
return true;
#else
return cpuinfo_isa.daz;
#endif
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_sse(void) {
#if CPUINFO_ARCH_X86_64
return true;
#elif CPUINFO_ARCH_X86
#if defined(__ANDROID__)
return true;
#else
return cpuinfo_isa.sse;
#endif
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_sse2(void) {
#if CPUINFO_ARCH_X86_64
return true;
#elif CPUINFO_ARCH_X86
#if defined(__ANDROID__)
return true;
#else
return cpuinfo_isa.sse2;
#endif
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_sse3(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
#if defined(__ANDROID__)
return true;
#else
return cpuinfo_isa.sse3;
#endif
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_ssse3(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
#if defined(__ANDROID__)
return true;
#else
return cpuinfo_isa.ssse3;
#endif
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_sse4_1(void) {
#if CPUINFO_ARCH_X86_64
#if defined(__ANDROID__)
return true;
#else
return cpuinfo_isa.sse4_1;
#endif
#elif CPUINFO_ARCH_X86
return cpuinfo_isa.sse4_1;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_sse4_2(void) {
#if CPUINFO_ARCH_X86_64
#if defined(__ANDROID__)
return true;
#else
return cpuinfo_isa.sse4_2;
#endif
#elif CPUINFO_ARCH_X86
return cpuinfo_isa.sse4_2;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_sse4a(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.sse4a;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_misaligned_sse(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.misaligned_sse;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avxvnni(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avxvnni;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_fma3(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.fma3;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_fma4(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.fma4;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_xop(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.xop;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_f16c(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.f16c;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx2(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx2;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512f(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512f;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512pf(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512pf;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512er(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512er;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512cd(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512cd;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512dq(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512dq;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512bw(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512bw;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512vl(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512vl;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512ifma(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512ifma;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512vbmi(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512vbmi;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512vbmi2(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512vbmi2;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512bitalg(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512bitalg;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512vpopcntdq(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512vpopcntdq;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512vnni(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512vnni;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512bf16(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512bf16;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512fp16(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512fp16;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512vp2intersect(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512vp2intersect;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512_4vnniw(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512_4vnniw;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_avx512_4fmaps(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx512_4fmaps;
#else
return false;
#endif
}
/* [NOTE] Intel Advanced Matrix Extensions (AMX) detection
*
* I. AMX is a new extensions to the x86 ISA to work on matrices, consists of
* 1) 2-dimentional registers (tiles), hold sub-matrices from larger matrices in memory
* 2) Accelerator called Tile Matrix Multiply (TMUL), contains instructions operating on tiles
*
* II. Platforms that supports AMX:
* +-----------------+-----+----------+----------+----------+----------+
* | Platforms | Gen | amx-bf16 | amx-tile | amx-int8 | amx-fp16 |
* +-----------------+-----+----------+----------+----------+----------+
* | Sapphire Rapids | 4th | YES | YES | YES | NO |
* +-----------------+-----+----------+----------+----------+----------+
* | Emerald Rapids | 5th | YES | YES | YES | NO |
* +-----------------+-----+----------+----------+----------+----------+
* | Granite Rapids | 6th | YES | YES | YES | YES |
* +-----------------+-----+----------+----------+----------+----------+
*
* Reference: https://www.intel.com/content/www/us/en/products/docs
* /accelerator-engines/advanced-matrix-extensions/overview.html
*/
static inline bool cpuinfo_has_x86_amx_bf16(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.amx_bf16;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_amx_tile(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.amx_tile;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_amx_int8(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.amx_int8;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_amx_fp16(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.amx_fp16;
#else
return false;
#endif
}
/*
* Intel AVX Vector Neural Network Instructions (VNNI) INT8
* Supported Platfroms: Sierra Forest, Arrow Lake, Lunar Lake
*/
static inline bool cpuinfo_has_x86_avx_vnni_int8(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx_vnni_int8;
#else
return false;
#endif
}
/*
* Intel AVX Vector Neural Network Instructions (VNNI) INT16
* Supported Platfroms: Arrow Lake, Lunar Lake
*/
static inline bool cpuinfo_has_x86_avx_vnni_int16(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx_vnni_int16;
#else
return false;
#endif
}
/*
* A new set of instructions, which can convert low precision floating point
* like BF16/FP16 to high precision floating point FP32, as well as convert FP32
* elements to BF16. This instruction allows the platform to have improved AI
* capabilities and better compatibility.
*
* Supported Platforms: Sierra Forest, Arrow Lake, Lunar Lake
*/
static inline bool cpuinfo_has_x86_avx_ne_convert(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.avx_ne_convert;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_hle(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.hle;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_rtm(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.rtm;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_xtest(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.xtest;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_mpx(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.mpx;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_cmov(void) {
#if CPUINFO_ARCH_X86_64
return true;
#elif CPUINFO_ARCH_X86
return cpuinfo_isa.cmov;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_cmpxchg8b(void) {
#if CPUINFO_ARCH_X86_64
return true;
#elif CPUINFO_ARCH_X86
return cpuinfo_isa.cmpxchg8b;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_cmpxchg16b(void) {
#if CPUINFO_ARCH_X86_64
return cpuinfo_isa.cmpxchg16b;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_clwb(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.clwb;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_movbe(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.movbe;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_lahf_sahf(void) {
#if CPUINFO_ARCH_X86
return true;
#elif CPUINFO_ARCH_X86_64
return cpuinfo_isa.lahf_sahf;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_lzcnt(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.lzcnt;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_popcnt(void) {
#if CPUINFO_ARCH_X86_64
#if defined(__ANDROID__)
return true;
#else
return cpuinfo_isa.popcnt;
#endif
#elif CPUINFO_ARCH_X86
return cpuinfo_isa.popcnt;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_tbm(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.tbm;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_bmi(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.bmi;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_bmi2(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.bmi2;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_adx(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.adx;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_aes(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.aes;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_vaes(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.vaes;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_pclmulqdq(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.pclmulqdq;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_vpclmulqdq(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.vpclmulqdq;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_gfni(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.gfni;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_rdrand(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.rdrand;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_rdseed(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.rdseed;
#else
return false;
#endif
}
static inline bool cpuinfo_has_x86_sha(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
return cpuinfo_isa.sha;
#else
return false;
#endif
}
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
/* This structure is not a part of stable API. Use cpuinfo_has_arm_* functions
* instead. */
struct cpuinfo_arm_isa {
#if CPUINFO_ARCH_ARM
bool thumb;
bool thumb2;
bool thumbee;
bool jazelle;
bool armv5e;
bool armv6;
bool armv6k;
bool armv7;
bool armv7mp;
bool armv8;
bool idiv;
bool vfpv2;
bool vfpv3;
bool d32;
bool fp16;
bool fma;
bool wmmx;
bool wmmx2;
bool neon;
#endif
#if CPUINFO_ARCH_ARM64
bool atomics;
bool bf16;
bool sve;
bool sve2;
bool i8mm;
bool sme;
bool sme2;
bool sme2p1;
bool sme_i16i32;
bool sme_bi32i32;
bool sme_b16b16;
bool sme_f16f16;
uint32_t svelen;
#endif
bool rdm;
bool fp16arith;
bool dot;
bool jscvt;
bool fcma;
bool fhm;
bool aes;
bool sha1;
bool sha2;
bool pmull;
bool crc32;
};
extern struct cpuinfo_arm_isa cpuinfo_isa;
#endif
static inline bool cpuinfo_has_arm_thumb(void) {
#if CPUINFO_ARCH_ARM
return cpuinfo_isa.thumb;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_thumb2(void) {
#if CPUINFO_ARCH_ARM
return cpuinfo_isa.thumb2;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_v5e(void) {
#if CPUINFO_ARCH_ARM
return cpuinfo_isa.armv5e;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_v6(void) {
#if CPUINFO_ARCH_ARM
return cpuinfo_isa.armv6;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_v6k(void) {
#if CPUINFO_ARCH_ARM
return cpuinfo_isa.armv6k;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_v7(void) {
#if CPUINFO_ARCH_ARM
return cpuinfo_isa.armv7;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_v7mp(void) {
#if CPUINFO_ARCH_ARM
return cpuinfo_isa.armv7mp;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_v8(void) {
#if CPUINFO_ARCH_ARM64
return true;
#elif CPUINFO_ARCH_ARM
return cpuinfo_isa.armv8;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_idiv(void) {
#if CPUINFO_ARCH_ARM64
return true;
#elif CPUINFO_ARCH_ARM
return cpuinfo_isa.idiv;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_vfpv2(void) {
#if CPUINFO_ARCH_ARM
return cpuinfo_isa.vfpv2;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_vfpv3(void) {
#if CPUINFO_ARCH_ARM64
return true;
#elif CPUINFO_ARCH_ARM
return cpuinfo_isa.vfpv3;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_vfpv3_d32(void) {
#if CPUINFO_ARCH_ARM64
return true;
#elif CPUINFO_ARCH_ARM
return cpuinfo_isa.vfpv3 && cpuinfo_isa.d32;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_vfpv3_fp16(void) {
#if CPUINFO_ARCH_ARM64
return true;
#elif CPUINFO_ARCH_ARM
return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_vfpv3_fp16_d32(void) {
#if CPUINFO_ARCH_ARM64
return true;
#elif CPUINFO_ARCH_ARM
return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16 && cpuinfo_isa.d32;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_vfpv4(void) {
#if CPUINFO_ARCH_ARM64
return true;
#elif CPUINFO_ARCH_ARM
return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_vfpv4_d32(void) {
#if CPUINFO_ARCH_ARM64
return true;
#elif CPUINFO_ARCH_ARM
return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma && cpuinfo_isa.d32;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_fp16_arith(void) {
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
return cpuinfo_isa.fp16arith;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_bf16(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.bf16;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_wmmx(void) {
#if CPUINFO_ARCH_ARM
return cpuinfo_isa.wmmx;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_wmmx2(void) {
#if CPUINFO_ARCH_ARM
return cpuinfo_isa.wmmx2;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_neon(void) {
#if CPUINFO_ARCH_ARM64
return true;
#elif CPUINFO_ARCH_ARM
return cpuinfo_isa.neon;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_neon_fp16(void) {
#if CPUINFO_ARCH_ARM64
return true;
#elif CPUINFO_ARCH_ARM
return cpuinfo_isa.neon && cpuinfo_isa.fp16;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_neon_fma(void) {
#if CPUINFO_ARCH_ARM64
return true;
#elif CPUINFO_ARCH_ARM
return cpuinfo_isa.neon && cpuinfo_isa.fma;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_neon_v8(void) {
#if CPUINFO_ARCH_ARM64
return true;
#elif CPUINFO_ARCH_ARM
return cpuinfo_isa.neon && cpuinfo_isa.armv8;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_atomics(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.atomics;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_neon_rdm(void) {
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
return cpuinfo_isa.rdm;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_neon_fp16_arith(void) {
#if CPUINFO_ARCH_ARM
return cpuinfo_isa.neon && cpuinfo_isa.fp16arith;
#elif CPUINFO_ARCH_ARM64
return cpuinfo_isa.fp16arith;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_fhm(void) {
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
return cpuinfo_isa.fhm;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_neon_dot(void) {
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
return cpuinfo_isa.dot;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_neon_bf16(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.bf16;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_jscvt(void) {
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
return cpuinfo_isa.jscvt;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_fcma(void) {
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
return cpuinfo_isa.fcma;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_i8mm(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.i8mm;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_aes(void) {
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
return cpuinfo_isa.aes;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_sha1(void) {
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
return cpuinfo_isa.sha1;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_sha2(void) {
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
return cpuinfo_isa.sha2;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_pmull(void) {
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
return cpuinfo_isa.pmull;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_crc32(void) {
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
return cpuinfo_isa.crc32;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_sve(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.sve;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_sve_bf16(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.sve && cpuinfo_isa.bf16;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_sve2(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.sve2;
#else
return false;
#endif
}
// Function to get the max SVE vector length on ARM CPU's which support SVE.
static inline uint32_t cpuinfo_get_max_arm_sve_length(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.svelen * 8; // bytes * 8 = bit length(vector length)
#else
return 0;
#endif
}
static inline bool cpuinfo_has_arm_sme(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.sme;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_sme2(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.sme2;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_sme2p1(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.sme2p1;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_sme_i16i32(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.sme_i16i32;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_sme_bi32i32(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.sme_bi32i32;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_sme_b16b16(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.sme_b16b16;
#else
return false;
#endif
}
static inline bool cpuinfo_has_arm_sme_f16f16(void) {
#if CPUINFO_ARCH_ARM64
return cpuinfo_isa.sme_f16f16;
#else
return false;
#endif
}
#if CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
/* This structure is not a part of stable API. Use cpuinfo_has_riscv_* functions
* instead. */
struct cpuinfo_riscv_isa {
/**
* Keep fields in line with the canonical order as defined by
* Section 27.11 Subset Naming Convention.
*/
/* RV32I/64I/128I Base ISA. */
bool i;
#if CPUINFO_ARCH_RISCV32
/* RV32E Base ISA. */
bool e;
#endif
/* Integer Multiply/Divide Extension. */
bool m;
/* Atomic Extension. */
bool a;
/* Single-Precision Floating-Point Extension. */
bool f;
/* Double-Precision Floating-Point Extension. */
bool d;
/* Compressed Extension. */
bool c;
/* Vector Extension. */
bool v;
};
extern struct cpuinfo_riscv_isa cpuinfo_isa;
#endif
static inline bool cpuinfo_has_riscv_i(void) {
#if CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
return cpuinfo_isa.i;
#else
return false;
#endif
}
static inline bool cpuinfo_has_riscv_e(void) {
#if CPUINFO_ARCH_RISCV32
return cpuinfo_isa.e;
#else
return false;
#endif
}
static inline bool cpuinfo_has_riscv_m(void) {
#if CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
return cpuinfo_isa.m;
#else
return false;
#endif
}
static inline bool cpuinfo_has_riscv_a(void) {
#if CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
return cpuinfo_isa.a;
#else
return false;
#endif
}
static inline bool cpuinfo_has_riscv_f(void) {
#if CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
return cpuinfo_isa.f;
#else
return false;
#endif
}
static inline bool cpuinfo_has_riscv_d(void) {
#if CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
return cpuinfo_isa.d;
#else
return false;
#endif
}
static inline bool cpuinfo_has_riscv_g(void) {
// The 'G' extension is simply shorthand for 'IMAFD'.
return cpuinfo_has_riscv_i() && cpuinfo_has_riscv_m() && cpuinfo_has_riscv_a() && cpuinfo_has_riscv_f() &&
cpuinfo_has_riscv_d();
}
static inline bool cpuinfo_has_riscv_c(void) {
#if CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
return cpuinfo_isa.c;
#else
return false;
#endif
}
static inline bool cpuinfo_has_riscv_v(void) {
#if CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
return cpuinfo_isa.v;
#else
return false;
#endif
}
const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processors(void);
const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_cores(void);
const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_clusters(void);
const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_packages(void);
const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarchs(void);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void);
const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processor(uint32_t index);
const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_core(uint32_t index);
const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_cluster(uint32_t index);
const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_package(uint32_t index);
const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarch(uint32_t index);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index);
uint32_t CPUINFO_ABI cpuinfo_get_processors_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_cores_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_clusters_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_packages_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_uarchs_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void);
/**
* Returns upper bound on cache size.
*/
uint32_t CPUINFO_ABI cpuinfo_get_max_cache_size(void);
/**
* Identify the logical processor that executes the current thread.
*
* There is no guarantee that the thread will stay on the same logical processor
* for any time. Callers should treat the result as only a hint, and be prepared
* to handle NULL return value.
*/
const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_current_processor(void);
/**
* Identify the core that executes the current thread.
*
* There is no guarantee that the thread will stay on the same core for any
* time. Callers should treat the result as only a hint, and be prepared to
* handle NULL return value.
*/
const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void);
/**
* Identify the microarchitecture index of the core that executes the current
* thread. If the system does not support such identification, the function
* returns 0.
*
* There is no guarantee that the thread will stay on the same type of core for
* any time. Callers should treat the result as only a hint.
*/
uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index(void);
/**
* Identify the microarchitecture index of the core that executes the current
* thread. If the system does not support such identification, the function
* returns the user-specified default value.
*
* There is no guarantee that the thread will stay on the same type of core for
* any time. Callers should treat the result as only a hint.
*/
uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index_with_default(uint32_t default_uarch_index);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* CPUINFO_H */
```
|
============================================================================================================
SOURCE CODE FILE: dnnl.h
LINES: 1
SIZE: 0.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\dnnl.h
ENCODING: utf-8
```h
/*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef DNNL_H
#define DNNL_H
#include "oneapi/dnnl/dnnl.h"
#endif /* DNNL_H */
```
|
===================================================================================================================
SOURCE CODE FILE: dnnl_config.h
LINES: 1
SIZE: 0.86 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\dnnl_config.h
ENCODING: utf-8
```h
/*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef DNNL_CONFIG_H
#define DNNL_CONFIG_H
#include "oneapi/dnnl/dnnl_config.h"
#endif /* DNNL_CONFIG_H */
```
|
==================================================================================================================
SOURCE CODE FILE: dnnl_debug.h
LINES: 1
SIZE: 0.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\dnnl_debug.h
ENCODING: utf-8
```h
/*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef DNNL_DEBUG_H
#define DNNL_DEBUG_H
#include "oneapi/dnnl/dnnl_debug.h"
#endif /* DNNL_DEBUG_H */
```
|
================================================================================================================
SOURCE CODE FILE: dnnl_ocl.h
LINES: 1
SIZE: 0.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\dnnl_ocl.h
ENCODING: utf-8
```h
/*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef DNNL_OCL_H
#define DNNL_OCL_H
#include "oneapi/dnnl/dnnl_ocl.h"
#endif /* DNNL_OCL_H */
```
|
=================================================================================================================
SOURCE CODE FILE: dnnl_sycl.h
LINES: 1
SIZE: 0.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\dnnl_sycl.h
ENCODING: utf-8
```h
/*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef DNNL_SYCL_H
#define DNNL_SYCL_H
#include "oneapi/dnnl/dnnl_sycl.h"
#endif /* DNNL_SYCL_H */
```
|
=======================================================================================================================
SOURCE CODE FILE: dnnl_sycl_types.h
LINES: 1
SIZE: 0.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\dnnl_sycl_types.h
ENCODING: utf-8
```h
/*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef DNNL_SYCL_TYPES_H
#define DNNL_SYCL_TYPES_H
#include "oneapi/dnnl/dnnl_sycl_types.h"
#endif /* DNNL_SYCL_TYPES_H */
```
|
=======================================================================================================================
SOURCE CODE FILE: dnnl_threadpool.h
LINES: 1
SIZE: 0.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\dnnl_threadpool.h
ENCODING: utf-8
```h
/*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef DNNL_THREADPOOL_H
#define DNNL_THREADPOOL_H
#include "oneapi/dnnl/dnnl_threadpool.h"
#endif /* DNNL_THREADPOOL_H */
```
|
==================================================================================================================
SOURCE CODE FILE: dnnl_types.h
LINES: 1
SIZE: 0.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\dnnl_types.h
ENCODING: utf-8
```h
/*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef DNNL_TYPES_H
#define DNNL_TYPES_H
#include "oneapi/dnnl/dnnl_types.h"
#endif /* DNNL_TYPES_H */
```
|
====================================================================================================================
SOURCE CODE FILE: dnnl_version.h
LINES: 1
SIZE: 0.86 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\dnnl_version.h
ENCODING: utf-8
```h
/*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef DNNL_VERSION_H
#define DNNL_VERSION_H
#include "oneapi/dnnl/dnnl_version.h"
#endif /* DNNL_VERSION_H */
```
|
==========================================================================================================================
SOURCE CODE FILE: experiments-config.h
LINES: 1
SIZE: 0.52 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\experiments-config.h
ENCODING: utf-8
```h
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
struct xnn_experiment_config {
int dummy; // C requires that a struct or union has at least one member
};
struct xnn_experiment_config* xnn_get_experiment_config();
void xnn_experiment_enable_adaptive_avx_optimization();
#ifdef __cplusplus
} // extern "C"
#endif
```
|
============================================================================================================
SOURCE CODE FILE: fp16.h
LINES: 1
SIZE: 0.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\fp16.h
ENCODING: utf-8
```h
#pragma once
#ifndef FP16_H
#define FP16_H
#include <fp16/fp16.h>
#if defined(PSIMD_H)
#include <fp16/psimd.h>
#endif
#endif /* FP16_H */
```
|
=============================================================================================================
SOURCE CODE FILE: fxdiv.h
LINES: 1
SIZE: 13.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\fxdiv.h
ENCODING: utf-8
```h
#pragma once
#ifndef FXDIV_H
#define FXDIV_H
#if defined(__cplusplus) && (__cplusplus >= 201103L)
#include <cstddef>
#include <cstdint>
#include <climits>
#elif !defined(__OPENCL_VERSION__)
#include <stddef.h>
#include <stdint.h>
#include <limits.h>
#endif
#if defined(_MSC_VER)
#include <intrin.h>
#if defined(_M_IX86) || defined(_M_X64)
#include <immintrin.h>
#endif
#endif
#ifndef FXDIV_USE_INLINE_ASSEMBLY
#define FXDIV_USE_INLINE_ASSEMBLY 0
#endif
static inline uint64_t fxdiv_mulext_uint32_t(uint32_t a, uint32_t b) {
#if defined(_MSC_VER) && defined(_M_IX86)
return (uint64_t) __emulu((unsigned int) a, (unsigned int) b);
#else
return (uint64_t) a * (uint64_t) b;
#endif
}
static inline uint32_t fxdiv_mulhi_uint32_t(uint32_t a, uint32_t b) {
#if defined(__OPENCL_VERSION__)
return mul_hi(a, b);
#elif defined(__CUDA_ARCH__)
return (uint32_t) __umulhi((unsigned int) a, (unsigned int) b);
#elif defined(_MSC_VER) && defined(_M_IX86)
return (uint32_t) (__emulu((unsigned int) a, (unsigned int) b) >> 32);
#elif defined(_MSC_VER) && defined(_M_ARM)
return (uint32_t) _MulUnsignedHigh((unsigned long) a, (unsigned long) b);
#else
return (uint32_t) (((uint64_t) a * (uint64_t) b) >> 32);
#endif
}
static inline uint64_t fxdiv_mulhi_uint64_t(uint64_t a, uint64_t b) {
#if defined(__OPENCL_VERSION__)
return mul_hi(a, b);
#elif defined(__CUDA_ARCH__)
return (uint64_t) __umul64hi((unsigned long long) a, (unsigned long long) b);
#elif defined(_MSC_VER) && defined(_M_X64)
return (uint64_t) __umulh((unsigned __int64) a, (unsigned __int64) b);
#elif defined(__GNUC__) && defined(__SIZEOF_INT128__)
return (uint64_t) (((((unsigned __int128) a) * ((unsigned __int128) b))) >> 64);
#else
const uint32_t a_lo = (uint32_t) a;
const uint32_t a_hi = (uint32_t) (a >> 32);
const uint32_t b_lo = (uint32_t) b;
const uint32_t b_hi = (uint32_t) (b >> 32);
const uint64_t t = fxdiv_mulext_uint32_t(a_hi, b_lo) +
(uint64_t) fxdiv_mulhi_uint32_t(a_lo, b_lo);
return fxdiv_mulext_uint32_t(a_hi, b_hi) + (t >> 32) +
((fxdiv_mulext_uint32_t(a_lo, b_hi) + (uint64_t) (uint32_t) t) >> 32);
#endif
}
static inline size_t fxdiv_mulhi_size_t(size_t a, size_t b) {
#if SIZE_MAX == UINT32_MAX
return (size_t) fxdiv_mulhi_uint32_t((uint32_t) a, (uint32_t) b);
#elif SIZE_MAX == UINT64_MAX
return (size_t) fxdiv_mulhi_uint64_t((uint64_t) a, (uint64_t) b);
#else
#error Unsupported platform
#endif
}
struct fxdiv_divisor_uint32_t {
uint32_t value;
uint32_t m;
uint8_t s1;
uint8_t s2;
};
struct fxdiv_result_uint32_t {
uint32_t quotient;
uint32_t remainder;
};
struct fxdiv_divisor_uint64_t {
uint64_t value;
uint64_t m;
uint8_t s1;
uint8_t s2;
};
struct fxdiv_result_uint64_t {
uint64_t quotient;
uint64_t remainder;
};
struct fxdiv_divisor_size_t {
size_t value;
size_t m;
uint8_t s1;
uint8_t s2;
};
struct fxdiv_result_size_t {
size_t quotient;
size_t remainder;
};
static inline struct fxdiv_divisor_uint32_t fxdiv_init_uint32_t(uint32_t d) {
struct fxdiv_divisor_uint32_t result = { d };
if (d == 1) {
result.m = UINT32_C(1);
result.s1 = 0;
result.s2 = 0;
} else {
#if defined(__OPENCL_VERSION__)
const uint32_t l_minus_1 = 31 - clz(d - 1);
#elif defined(__CUDA_ARCH__)
const uint32_t l_minus_1 = 31 - __clz((int) (d - 1));
#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64))
unsigned long l_minus_1;
_BitScanReverse(&l_minus_1, (unsigned long) (d - 1));
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) && FXDIV_USE_INLINE_ASSEMBLY
uint32_t l_minus_1;
__asm__("BSRL %[d_minus_1], %[l_minus_1]"
: [l_minus_1] "=r" (l_minus_1)
: [d_minus_1] "r" (d - 1)
: "cc");
#elif defined(__GNUC__)
const uint32_t l_minus_1 = 31 - __builtin_clz(d - 1);
#else
/* Based on Algorithm 2 from Hacker's delight */
uint32_t l_minus_1 = 0;
uint32_t x = d - 1;
uint32_t y = x >> 16;
if (y != 0) {
l_minus_1 += 16;
x = y;
}
y = x >> 8;
if (y != 0) {
l_minus_1 += 8;
x = y;
}
y = x >> 4;
if (y != 0) {
l_minus_1 += 4;
x = y;
}
y = x >> 2;
if (y != 0) {
l_minus_1 += 2;
x = y;
}
if ((x & 2) != 0) {
l_minus_1 += 1;
}
#endif
uint32_t u_hi = (UINT32_C(2) << (uint32_t) l_minus_1) - d;
/* Division of 64-bit number u_hi:UINT32_C(0) by 32-bit number d, 32-bit quotient output q */
#if defined(__GNUC__) && defined(__i386__) && FXDIV_USE_INLINE_ASSEMBLY
uint32_t q;
__asm__("DIVL %[d]"
: "=a" (q), "+d" (u_hi)
: [d] "r" (d), "a" (0)
: "cc");
#elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (defined(_M_IX86) || defined(_M_X64))
unsigned int remainder;
const uint32_t q = (uint32_t) _udiv64((unsigned __int64) ((uint64_t) u_hi << 32), (unsigned int) d, &remainder);
#else
const uint32_t q = ((uint64_t) u_hi << 32) / d;
#endif
result.m = q + UINT32_C(1);
result.s1 = 1;
result.s2 = (uint8_t) l_minus_1;
}
return result;
}
static inline struct fxdiv_divisor_uint64_t fxdiv_init_uint64_t(uint64_t d) {
struct fxdiv_divisor_uint64_t result = { d };
if (d == 1) {
result.m = UINT64_C(1);
result.s1 = 0;
result.s2 = 0;
} else {
#if defined(__OPENCL_VERSION__)
const uint32_t nlz_d = clz(d);
const uint32_t l_minus_1 = 63 - clz(d - 1);
#elif defined(__CUDA_ARCH__)
const uint32_t nlz_d = __clzll((long long) d);
const uint32_t l_minus_1 = 63 - __clzll((long long) (d - 1));
#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
unsigned long l_minus_1;
_BitScanReverse64(&l_minus_1, (unsigned __int64) (d - 1));
unsigned long bsr_d;
_BitScanReverse64(&bsr_d, (unsigned __int64) d);
const uint32_t nlz_d = bsr_d ^ 0x3F;
#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_ARM))
const uint64_t d_minus_1 = d - 1;
const uint8_t d_is_power_of_2 = (d & d_minus_1) == 0;
unsigned long l_minus_1;
if ((uint32_t) (d_minus_1 >> 32) == 0) {
_BitScanReverse(&l_minus_1, (unsigned long) d_minus_1);
} else {
_BitScanReverse(&l_minus_1, (unsigned long) (uint32_t) (d_minus_1 >> 32));
l_minus_1 += 32;
}
const uint32_t nlz_d = ((uint8_t) l_minus_1 ^ UINT8_C(0x3F)) - d_is_power_of_2;
#elif defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY
uint64_t l_minus_1;
__asm__("BSRQ %[d_minus_1], %[l_minus_1]"
: [l_minus_1] "=r" (l_minus_1)
: [d_minus_1] "r" (d - 1)
: "cc");
#elif defined(__GNUC__)
const uint32_t l_minus_1 = 63 - __builtin_clzll(d - 1);
const uint32_t nlz_d = __builtin_clzll(d);
#else
/* Based on Algorithm 2 from Hacker's delight */
const uint64_t d_minus_1 = d - 1;
const uint32_t d_is_power_of_2 = (d & d_minus_1) == 0;
uint32_t l_minus_1 = 0;
uint32_t x = (uint32_t) d_minus_1;
uint32_t y = d_minus_1 >> 32;
if (y != 0) {
l_minus_1 += 32;
x = y;
}
y = x >> 16;
if (y != 0) {
l_minus_1 += 16;
x = y;
}
y = x >> 8;
if (y != 0) {
l_minus_1 += 8;
x = y;
}
y = x >> 4;
if (y != 0) {
l_minus_1 += 4;
x = y;
}
y = x >> 2;
if (y != 0) {
l_minus_1 += 2;
x = y;
}
if ((x & 2) != 0) {
l_minus_1 += 1;
}
const uint32_t nlz_d = (l_minus_1 ^ UINT32_C(0x3F)) - d_is_power_of_2;
#endif
uint64_t u_hi = (UINT64_C(2) << (uint32_t) l_minus_1) - d;
/* Division of 128-bit number u_hi:UINT64_C(0) by 64-bit number d, 64-bit quotient output q */
#if defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY
uint64_t q;
__asm__("DIVQ %[d]"
: "=a" (q), "+d" (u_hi)
: [d] "r" (d), "a" (UINT64_C(0))
: "cc");
#elif 0 && defined(__GNUC__) && defined(__SIZEOF_INT128__)
/* GCC, Clang, and Intel Compiler fail to inline optimized implementation and call into support library for 128-bit division */
const uint64_t q = (uint64_t) (((unsigned __int128) u_hi << 64) / ((unsigned __int128) d));
#elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && defined(_M_X64)
unsigned __int64 remainder;
const uint64_t q = (uint64_t) _udiv128((unsigned __int64) u_hi, 0, (unsigned __int64) d, &remainder);
#else
/* Implementation based on code from Hacker's delight */
/* Normalize divisor and shift divident left */
d <<= nlz_d;
u_hi <<= nlz_d;
/* Break divisor up into two 32-bit digits */
const uint64_t d_hi = (uint32_t) (d >> 32);
const uint32_t d_lo = (uint32_t) d;
/* Compute the first quotient digit, q1 */
uint64_t q1 = u_hi / d_hi;
uint64_t r1 = u_hi - q1 * d_hi;
while ((q1 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q1, d_lo) > (r1 << 32)) {
q1 -= 1;
r1 += d_hi;
if ((r1 >> 32) != 0) {
break;
}
}
/* Multiply and subtract. */
u_hi = (u_hi << 32) - q1 * d;
/* Compute the second quotient digit, q0 */
uint64_t q0 = u_hi / d_hi;
uint64_t r0 = u_hi - q0 * d_hi;
while ((q0 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q0, d_lo) > (r0 << 32)) {
q0 -= 1;
r0 += d_hi;
if ((r0 >> 32) != 0) {
break;
}
}
const uint64_t q = (q1 << 32) | (uint32_t) q0;
#endif
result.m = q + UINT64_C(1);
result.s1 = 1;
result.s2 = (uint8_t) l_minus_1;
}
return result;
}
static inline struct fxdiv_divisor_size_t fxdiv_init_size_t(size_t d) {
#if SIZE_MAX == UINT32_MAX
const struct fxdiv_divisor_uint32_t uint_result = fxdiv_init_uint32_t((uint32_t) d);
#elif SIZE_MAX == UINT64_MAX
const struct fxdiv_divisor_uint64_t uint_result = fxdiv_init_uint64_t((uint64_t) d);
#else
#error Unsupported platform
#endif
struct fxdiv_divisor_size_t size_result = {
(size_t) uint_result.value,
(size_t) uint_result.m,
uint_result.s1,
uint_result.s2
};
return size_result;
}
static inline uint32_t fxdiv_quotient_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) {
const uint32_t t = fxdiv_mulhi_uint32_t(n, divisor.m);
return (t + ((n - t) >> divisor.s1)) >> divisor.s2;
}
static inline uint64_t fxdiv_quotient_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) {
const uint64_t t = fxdiv_mulhi_uint64_t(n, divisor.m);
return (t + ((n - t) >> divisor.s1)) >> divisor.s2;
}
static inline size_t fxdiv_quotient_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) {
#if SIZE_MAX == UINT32_MAX
const struct fxdiv_divisor_uint32_t uint32_divisor = {
(uint32_t) divisor.value,
(uint32_t) divisor.m,
divisor.s1,
divisor.s2
};
return fxdiv_quotient_uint32_t((uint32_t) n, uint32_divisor);
#elif SIZE_MAX == UINT64_MAX
const struct fxdiv_divisor_uint64_t uint64_divisor = {
(uint64_t) divisor.value,
(uint64_t) divisor.m,
divisor.s1,
divisor.s2
};
return fxdiv_quotient_uint64_t((uint64_t) n, uint64_divisor);
#else
#error Unsupported platform
#endif
}
static inline uint32_t fxdiv_remainder_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) {
const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor);
return n - quotient * divisor.value;
}
static inline uint64_t fxdiv_remainder_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) {
const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor);
return n - quotient * divisor.value;
}
static inline size_t fxdiv_remainder_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) {
const size_t quotient = fxdiv_quotient_size_t(n, divisor);
return n - quotient * divisor.value;
}
static inline uint32_t fxdiv_round_down_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t granularity) {
const uint32_t quotient = fxdiv_quotient_uint32_t(n, granularity);
return quotient * granularity.value;
}
static inline uint64_t fxdiv_round_down_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t granularity) {
const uint64_t quotient = fxdiv_quotient_uint64_t(n, granularity);
return quotient * granularity.value;
}
static inline size_t fxdiv_round_down_size_t(size_t n, const struct fxdiv_divisor_size_t granularity) {
const size_t quotient = fxdiv_quotient_size_t(n, granularity);
return quotient * granularity.value;
}
static inline struct fxdiv_result_uint32_t fxdiv_divide_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) {
const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor);
const uint32_t remainder = n - quotient * divisor.value;
struct fxdiv_result_uint32_t result = { quotient, remainder };
return result;
}
static inline struct fxdiv_result_uint64_t fxdiv_divide_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) {
const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor);
const uint64_t remainder = n - quotient * divisor.value;
struct fxdiv_result_uint64_t result = { quotient, remainder };
return result;
}
static inline struct fxdiv_result_size_t fxdiv_divide_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) {
const size_t quotient = fxdiv_quotient_size_t(n, divisor);
const size_t remainder = n - quotient * divisor.value;
struct fxdiv_result_size_t result = { quotient, remainder };
return result;
}
#endif /* FXDIV_H */
```
|
=============================================================================================================================
SOURCE CODE FILE: AbstractConfig.h
LINES: 1
SIZE: 3.81 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\AbstractConfig.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <chrono>
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace libkineto {
class AbstractConfig {
public:
AbstractConfig& operator=(const AbstractConfig&) = delete;
AbstractConfig(AbstractConfig&&) = delete;
AbstractConfig& operator=(AbstractConfig&&) = delete;
virtual ~AbstractConfig() {
for (const auto& p : featureConfigs_) {
delete p.second;
}
}
// Return a copy of the full derived class
virtual AbstractConfig* cloneDerived(AbstractConfig& parent) const = 0;
// Returns true if successfully parsed the config string
bool parse(const std::string& conf);
// Default setup for signal-triggered profiling
virtual void setSignalDefaults() {
for (auto& p : featureConfigs_) {
p.second->setSignalDefaults();
}
}
// Default setup for client-triggered profiling
virtual void setClientDefaults() {
for (auto& p : featureConfigs_) {
p.second->setClientDefaults();
}
}
// Time config was created / updated
std::chrono::time_point<std::chrono::system_clock> timestamp() const {
return timestamp_;
}
// Source config string that this was parsed from
const std::string& source() const {
return source_;
}
AbstractConfig& feature(std::string name) const {
const auto& pos = featureConfigs_.find(name);
return *pos->second;
}
// Transfers ownership of cfg arg
void addFeature(const std::string& name, AbstractConfig* cfg) {
featureConfigs_[name] = cfg;
}
protected:
AbstractConfig() {}
AbstractConfig(const AbstractConfig& other) = default;
// Return true if the option was recognized and successfully parsed.
// Throw std::invalid_argument if val is invalid.
virtual bool handleOption(const std::string& name, std::string& val);
// Perform post-validation checks, typically conditons involving
// multiple options.
// Throw std::invalid_argument if automatic correction can not be made.
//
// @param fallbackProfileStartTime Specify a fallback profile start timestamp
// in case it was never specified by the client
virtual void validate(
const std::chrono::time_point<std::chrono::system_clock>&
fallbackProfileStartTime) = 0;
// TODO: Separate out each profiler type into features?
virtual void printActivityProfilerConfig(std::ostream& s) const;
virtual void setActivityDependentConfig();
// Helpers for use in handleOption
// Split a string by delimiter and remove external white space
std::vector<std::string> splitAndTrim(const std::string& s, char delim) const;
// Lowercase for case-insensitive comparisons
std::string toLower(std::string& s) const;
// Does string end with suffix
bool endsWith(const std::string& s, const std::string& suffix) const;
// Conversions
int64_t toIntRange(const std::string& val, int64_t min, int64_t max) const;
int32_t toInt32(const std::string& val) const;
int64_t toInt64(const std::string& val) const;
bool toBool(std::string& val) const;
void cloneFeaturesInto(AbstractConfig& cfg) const {
for (const auto& feature : featureConfigs_) {
cfg.featureConfigs_[feature.first] = feature.second->cloneDerived(cfg);
}
}
private:
// Time config was created / updated
std::chrono::time_point<std::chrono::system_clock> timestamp_{};
// Original configuration string, used for comparison
std::string source_{""};
// Configuration objects for optional features
std::map<std::string, AbstractConfig*> featureConfigs_{};
};
} // namespace libkineto
```
|
========================================================================================================================================
SOURCE CODE FILE: ActivityProfilerInterface.h
LINES: 1
SIZE: 3.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\ActivityProfilerInterface.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <memory>
#include <set>
#include <thread>
#include <vector>
#include "ActivityTraceInterface.h"
#include "ActivityType.h"
#include "IActivityProfiler.h"
namespace libkineto {
class ActivityProfilerController;
struct CpuTraceBuffer;
class Config;
class ActivityProfilerInterface {
public:
virtual ~ActivityProfilerInterface() {}
virtual void init() {}
virtual bool isInitialized() {
return false;
}
virtual bool isActive() {
return false;
}
// *** Asynchronous API ***
// Instead of starting and stopping the trace manually, provide a start time
// and duration and / or iteration stop criterion.
// Tracing terminates when either condition is met.
virtual void scheduleTrace(const std::string& configStr) {}
// *** Synchronous API ***
// These must be called in order:
// prepareTrace -> startTrace -> stopTrace.
// Many tracing structures are lazily initialized during trace collection,
// with potentially high overhead.
// Call prepareTrace to enable tracing, then run the region to trace
// at least once (and ideally run the same code that is to be traced) to
// allow tracing structures to be initialized.
virtual void prepareTrace(
const std::set<ActivityType>& activityTypes,
const std::string& configStr = "") {}
// Toggle GPU tracing as a trace is running to omit certain parts of a graph
virtual void toggleCollectionDynamic(const bool enable) {}
// Start recording, potentially reusing any buffers allocated since
// prepareTrace was called.
virtual void startTrace() {}
// Stop and process trace, producing an in-memory list of trace records.
// The processing will be done synchronously (using the calling thread.)
virtual std::unique_ptr<ActivityTraceInterface> stopTrace() {
return nullptr;
}
// Re-evaluate internal state to allow for triggering operations based
// on number of iteration. each implicitly increments the iteration count
virtual void step() {}
// *** TraceActivity API ***
// FIXME: Pass activityProfiler interface into clientInterface?
virtual void pushCorrelationId(uint64_t id) {}
virtual void popCorrelationId() {}
virtual void transferCpuTrace(std::unique_ptr<CpuTraceBuffer> traceBuffer) {}
// Correlation ids for user defined spans
virtual void pushUserCorrelationId(uint64_t) {}
virtual void popUserCorrelationId() {}
// Saves information for the current thread to be used in profiler output
// Client must record any new kernel thread where the activity has occured.
virtual void recordThreadInfo() {}
// Record trace metadata, currently supporting only string key and values,
// values with the same key are overwritten
virtual void addMetadata(
const std::string& key,
const std::string& value) = 0;
// Add a child activity profiler, this enables frameworks in the application
// to enable custom framework events.
virtual void addChildActivityProfiler(
std::unique_ptr<IActivityProfiler> profiler) {}
// Log Invariant Violation to factories enabled. This helps record
// instances when the profiler behaves unexpectedly.
virtual void logInvariantViolation(
const std::string&,
const std::string&,
const std::string&,
const std::string& = "") {}
};
} // namespace libkineto
```
|
=====================================================================================================================================
SOURCE CODE FILE: ActivityTraceInterface.h
LINES: 1
SIZE: 0.60 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\ActivityTraceInterface.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <memory>
#include <string>
#include <vector>
namespace libkineto {
struct ITraceActivity;
class ActivityTraceInterface {
public:
virtual ~ActivityTraceInterface() {}
virtual const std::vector<const ITraceActivity*>* activities() {
return nullptr;
}
virtual void save(const std::string& path) {}
};
} // namespace libkineto
```
|
===========================================================================================================================
SOURCE CODE FILE: ActivityType.h
LINES: 1
SIZE: 2.30 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\ActivityType.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <array>
#include <set>
#include <string>
namespace libkineto {
// Note : All activity types are not enabled by default. Please add them
// at correct position in the enum
enum class ActivityType {
// Activity types enabled by default
CPU_OP = 0, // cpu side ops
USER_ANNOTATION,
GPU_USER_ANNOTATION,
GPU_MEMCPY,
GPU_MEMSET,
CONCURRENT_KERNEL, // on-device kernels
EXTERNAL_CORRELATION,
CUDA_RUNTIME, // host side cuda runtime events
CUDA_DRIVER, // host side cuda driver events
CPU_INSTANT_EVENT, // host side point-like events
PYTHON_FUNCTION,
OVERHEAD, // CUPTI induced overhead events sampled from its overhead API.
MTIA_RUNTIME, // host side MTIA runtime events
MTIA_CCP_EVENTS, // MTIA ondevice CCP events
CUDA_SYNC, // synchronization events between runtime and kernels
// Optional Activity types
GLOW_RUNTIME, // host side glow runtime events
CUDA_PROFILER_RANGE, // CUPTI Profiler range for performance metrics
HPU_OP, // HPU host side runtime event
XPU_RUNTIME, // host side xpu runtime events
COLLECTIVE_COMM, // collective communication
MTIA_WORKLOADD, // MTIA workloadd events
// PRIVATEUSE1 Activity types are used for custom backends.
// The corresponding device type is `DeviceType::PrivateUse1` in PyTorch.
PRIVATEUSE1_RUNTIME, // host side privateUse1 runtime events
PRIVATEUSE1_DRIVER, // host side privateUse1 driver events
ENUM_COUNT, // This is to add buffer and not used for any profiling logic. Add
// your new type before it.
OPTIONAL_ACTIVITY_TYPE_START = GLOW_RUNTIME,
};
const char* toString(ActivityType t);
ActivityType toActivityType(const std::string& str);
// Return an array of all activity types except COUNT
constexpr int activityTypeCount = (int)ActivityType::ENUM_COUNT;
constexpr int defaultActivityTypeCount =
(int)ActivityType::OPTIONAL_ACTIVITY_TYPE_START;
const std::array<ActivityType, activityTypeCount> activityTypes();
const std::array<ActivityType, defaultActivityTypeCount> defaultActivityTypes();
} // namespace libkineto
```
|
==============================================================================================================================
SOURCE CODE FILE: ClientInterface.h
LINES: 1
SIZE: 0.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\ClientInterface.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
namespace libkineto {
class ClientInterface {
public:
virtual ~ClientInterface() {}
virtual void init() = 0;
virtual void prepare(bool, bool, bool, bool, bool) = 0;
virtual void start() = 0;
virtual void stop() = 0;
};
} // namespace libkineto
```
|
=====================================================================================================================
SOURCE CODE FILE: Config.h
LINES: 1
SIZE: 15.20 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\Config.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include "AbstractConfig.h"
#include "ActivityType.h"
#include <assert.h>
#include <chrono>
#include <functional>
#include <set>
#include <string>
#include <vector>
namespace libkineto {
class Config : public AbstractConfig {
public:
Config();
Config& operator=(const Config&) = delete;
Config(Config&&) = delete;
Config& operator=(Config&&) = delete;
// Return a full copy including feature config object
std::unique_ptr<Config> clone() const {
auto cfg = std::unique_ptr<Config>(new Config(*this));
cloneFeaturesInto(*cfg);
return cfg;
}
bool handleOption(const std::string& name, std::string& val) override;
void setClientDefaults() override;
// Log events to this file
const std::string& eventLogFile() const {
return eventLogFile_;
}
bool activityProfilerEnabled() const {
return activityProfilerEnabled_ ||
activitiesOnDemandTimestamp_.time_since_epoch().count() > 0;
}
// Log activitiy trace to this file
const std::string& activitiesLogFile() const {
return activitiesLogFile_;
}
// Log activitiy trace to this url
const std::string& activitiesLogUrl() const {
return activitiesLogUrl_;
}
void setActivitiesLogUrl(const std::string& url) {
activitiesLogUrl_ = url;
}
bool activitiesLogToMemory() const {
return activitiesLogToMemory_;
}
bool eventProfilerEnabled() const {
return !eventNames_.empty() || !metricNames_.empty();
}
// Is profiling enabled for the given device?
bool eventProfilerEnabledForDevice(uint32_t dev) const {
return 0 != (eventProfilerDeviceMask_ & (1 << dev));
}
// Take a sample (read hardware counters) at this frequency.
// This controls how often counters are read - if all counters cannot
// be collected simultaneously then multiple samples are needed to
// collect all requested counters - see multiplex period.
std::chrono::milliseconds samplePeriod() const {
return samplePeriod_;
}
void setSamplePeriod(std::chrono::milliseconds period) {
samplePeriod_ = period;
}
// When all requested counters cannot be collected simultaneously,
// counters will be multiplexed at this frequency.
// Multiplexing can have a large performance impact if done frequently.
// To avoid a perf impact, keep this at 1s or above.
std::chrono::milliseconds multiplexPeriod() const {
return multiplexPeriod_;
}
void setMultiplexPeriod(std::chrono::milliseconds period) {
multiplexPeriod_ = period;
}
// Report counters at this frequency. Note that several samples can
// be reported each time, see samplesPerReport.
std::chrono::milliseconds reportPeriod() const {
return reportPeriod_;
}
void setReportPeriod(std::chrono::milliseconds msecs);
// Number of samples dispatched each report period.
// Must be in the range [1, report period / sample period].
// In other words, aggregation is supported but not interpolation.
int samplesPerReport() const {
return samplesPerReport_;
}
void setSamplesPerReport(int count) {
samplesPerReport_ = count;
}
// The names of events to collect
const std::set<std::string>& eventNames() const {
return eventNames_;
}
// Add additional events to be profiled
void addEvents(const std::set<std::string>& names) {
eventNames_.insert(names.begin(), names.end());
}
// The names of metrics to collect
const std::set<std::string>& metricNames() const {
return metricNames_;
}
// Add additional metrics to be profiled
void addMetrics(const std::set<std::string>& names) {
metricNames_.insert(names.begin(), names.end());
}
const std::vector<int>& percentiles() const {
return eventReportPercentiles_;
}
// Profile for this long, then revert to base config
std::chrono::seconds eventProfilerOnDemandDuration() const {
return eventProfilerOnDemandDuration_;
}
void setEventProfilerOnDemandDuration(std::chrono::seconds duration) {
eventProfilerOnDemandDuration_ = duration;
}
// Too many event profilers on a single system can overload the driver.
// At some point, latencies shoot through the roof and collection of samples
// becomes impossible. To avoid this situation we have a limit of profilers
// per GPU.
// NOTE: Communication with a daemon is needed for this feature.
// Library must be built with an active DaemonConfigLoader.
int maxEventProfilersPerGpu() const {
return eventProfilerMaxInstancesPerGpu_;
}
// On Cuda11 we've seen occasional hangs when reprogramming counters
// Monitor profiling threads and report when a thread is not responding
// for a given number of seconds.
// A period of 0 means disable.
std::chrono::seconds eventProfilerHeartbeatMonitorPeriod() const {
return eventProfilerHeartbeatMonitorPeriod_;
}
// The types of activities selected in the configuration file
const std::set<ActivityType>& selectedActivityTypes() const {
return selectedActivityTypes_;
}
// Set the types of activities to be traced
bool perThreadBufferEnabled() const {
return perThreadBufferEnabled_;
}
void setSelectedActivityTypes(const std::set<ActivityType>& types) {
selectedActivityTypes_ = types;
}
bool isReportInputShapesEnabled() const {
return enableReportInputShapes_;
}
bool isProfileMemoryEnabled() const {
return enableProfileMemory_;
}
bool isWithStackEnabled() const {
return enableWithStack_;
}
bool isWithFlopsEnabled() const {
return enableWithFlops_;
}
bool isWithModulesEnabled() const {
return enableWithModules_;
}
// Trace for this long
std::chrono::milliseconds activitiesDuration() const {
return activitiesDuration_;
}
// Trace for this many iterations, determined by external API
int activitiesRunIterations() const {
return activitiesRunIterations_;
}
int activitiesMaxGpuBufferSize() const {
return activitiesMaxGpuBufferSize_;
}
std::chrono::seconds activitiesWarmupDuration() const {
return activitiesWarmupDuration_;
}
int activitiesWarmupIterations() const {
return activitiesWarmupIterations_;
}
// Show CUDA Synchronization Stream Wait Events
bool activitiesCudaSyncWaitEvents() const {
return activitiesCudaSyncWaitEvents_;
}
void setActivitiesCudaSyncWaitEvents(bool enable) {
activitiesCudaSyncWaitEvents_ = enable;
}
// Timestamp at which the profiling to start, requested by the user.
const std::chrono::time_point<std::chrono::system_clock> requestTimestamp()
const {
if (profileStartTime_.time_since_epoch().count()) {
return profileStartTime_;
}
// If no one requested timestamp, return 0.
if (requestTimestamp_.time_since_epoch().count() == 0) {
return requestTimestamp_;
}
// TODO(T94634890): Deprecate requestTimestamp
return requestTimestamp_ + maxRequestAge() + activitiesWarmupDuration();
}
bool hasProfileStartTime() const {
return requestTimestamp_.time_since_epoch().count() > 0 ||
profileStartTime_.time_since_epoch().count() > 0;
}
int profileStartIteration() const {
return profileStartIteration_;
}
bool hasProfileStartIteration() const {
return profileStartIteration_ >= 0 && activitiesRunIterations_ > 0;
}
void setProfileStartIteration(int iter) {
profileStartIteration_ = iter;
}
int profileStartIterationRoundUp() const {
return profileStartIterationRoundUp_;
}
// calculate the start iteration accounting for warmup
int startIterationIncludingWarmup() const {
if (!hasProfileStartIteration()) {
return -1;
}
return profileStartIteration_ - activitiesWarmupIterations_;
}
const std::chrono::seconds maxRequestAge() const;
// All VLOG* macros will log if the verbose log level is >=
// the verbosity specified for the verbose log message.
// Default value is -1, so messages with log level 0 will log by default.
int verboseLogLevel() const {
return verboseLogLevel_;
}
// Modules for which verbose logging is enabled.
// If empty, logging is enabled for all modules.
const std::vector<std::string>& verboseLogModules() const {
return verboseLogModules_;
}
bool sigUsr2Enabled() const {
return enableSigUsr2_;
}
bool ipcFabricEnabled() const {
return enableIpcFabric_;
}
std::chrono::seconds onDemandConfigUpdateIntervalSecs() const {
return onDemandConfigUpdateIntervalSecs_;
}
static std::chrono::milliseconds alignUp(
std::chrono::milliseconds duration,
std::chrono::milliseconds alignment) {
duration += alignment;
return duration - (duration % alignment);
}
std::chrono::time_point<std::chrono::system_clock>
eventProfilerOnDemandStartTime() const {
return eventProfilerOnDemandTimestamp_;
}
std::chrono::time_point<std::chrono::system_clock>
eventProfilerOnDemandEndTime() const {
return eventProfilerOnDemandTimestamp_ + eventProfilerOnDemandDuration_;
}
std::chrono::time_point<std::chrono::system_clock>
activityProfilerRequestReceivedTime() const {
return activitiesOnDemandTimestamp_;
}
static constexpr std::chrono::milliseconds kControllerIntervalMsecs{1000};
// Users may request and set trace id and group trace id.
const std::string& requestTraceID() const {
return requestTraceID_;
}
void setRequestTraceID(const std::string& tid) {
requestTraceID_ = tid;
}
const std::string& requestGroupTraceID() const {
return requestGroupTraceID_;
}
void setRequestGroupTraceID(const std::string& gtid) {
requestGroupTraceID_ = gtid;
}
size_t cuptiDeviceBufferSize() const {
return cuptiDeviceBufferSize_;
}
size_t cuptiDeviceBufferPoolLimit() const {
return cuptiDeviceBufferPoolLimit_;
}
void updateActivityProfilerRequestReceivedTime();
void printActivityProfilerConfig(std::ostream& s) const override;
void setActivityDependentConfig() override;
void validate(const std::chrono::time_point<std::chrono::system_clock>&
fallbackProfileStartTime) override;
static void addConfigFactory(
std::string name,
std::function<AbstractConfig*(Config&)> factory);
void print(std::ostream& s) const;
// Config relies on some state with global static lifetime. If other
// threads are using the config, it's possible that the global state
// is destroyed before the threads stop. By hanging onto this handle,
// correct destruction order can be ensured.
static std::shared_ptr<void> getStaticObjectsLifetimeHandle();
bool getTSCTimestampFlag() const {
return useTSCTimestamp_;
}
void setTSCTimestampFlag(bool flag) {
useTSCTimestamp_ = flag;
}
private:
explicit Config(const Config& other) = default;
AbstractConfig* cloneDerived(AbstractConfig& parent) const override {
// Clone from AbstractConfig not supported
assert(false);
return nullptr;
}
uint8_t createDeviceMask(const std::string& val);
// Adds valid activity types from the user defined string list in the
// configuration file
void setActivityTypes(const std::vector<std::string>& selected_activities);
// Sets the default activity types to be traced
void selectDefaultActivityTypes() {
// If the user has not specified an activity list, add all types
for (ActivityType t : defaultActivityTypes()) {
selectedActivityTypes_.insert(t);
}
}
int verboseLogLevel_;
std::vector<std::string> verboseLogModules_;
// Event profiler
// These settings are also supported in on-demand mode
std::chrono::milliseconds samplePeriod_;
std::chrono::milliseconds reportPeriod_;
int samplesPerReport_;
std::set<std::string> eventNames_;
std::set<std::string> metricNames_;
// On-demand duration
std::chrono::seconds eventProfilerOnDemandDuration_;
// Last on-demand request
std::chrono::time_point<std::chrono::system_clock>
eventProfilerOnDemandTimestamp_;
int eventProfilerMaxInstancesPerGpu_;
// Monitor whether event profiler threads are stuck
// at this frequency
std::chrono::seconds eventProfilerHeartbeatMonitorPeriod_;
// These settings can not be changed on-demand
std::string eventLogFile_;
std::vector<int> eventReportPercentiles_ = {5, 25, 50, 75, 95};
uint8_t eventProfilerDeviceMask_ = ~0;
std::chrono::milliseconds multiplexPeriod_;
// Activity profiler
bool activityProfilerEnabled_;
// Enable per-thread buffer
bool perThreadBufferEnabled_;
std::set<ActivityType> selectedActivityTypes_;
// The activity profiler settings are all on-demand
std::string activitiesLogFile_;
std::string activitiesLogUrl_;
// Log activities to memory buffer
bool activitiesLogToMemory_{false};
int activitiesMaxGpuBufferSize_;
std::chrono::seconds activitiesWarmupDuration_;
int activitiesWarmupIterations_;
bool activitiesCudaSyncWaitEvents_;
// Enable Profiler Config Options
// Temporarily disable shape collection until we re-roll out the feature for
// on-demand cases
bool enableReportInputShapes_{false};
bool enableProfileMemory_{false};
bool enableWithStack_{false};
bool enableWithFlops_{false};
bool enableWithModules_{false};
// Profile for specified iterations and duration
std::chrono::milliseconds activitiesDuration_;
int activitiesRunIterations_;
// Below are not used
// Use this net name for iteration count
std::string activitiesExternalAPIIterationsTarget_;
// Only profile nets that includes this in the name
std::vector<std::string> activitiesExternalAPIFilter_;
// Only profile nets with at least this many operators
int activitiesExternalAPINetSizeThreshold_;
// Only profile nets with at least this many GPU operators
int activitiesExternalAPIGpuOpCountThreshold_;
// Last activity profiler request
std::chrono::time_point<std::chrono::system_clock>
activitiesOnDemandTimestamp_;
// ActivityProfilers are triggered by either:
// Synchronized start timestamps
std::chrono::time_point<std::chrono::system_clock> profileStartTime_;
// Or start iterations.
int profileStartIteration_;
int profileStartIterationRoundUp_;
// DEPRECATED
std::chrono::time_point<std::chrono::system_clock> requestTimestamp_;
// Enable profiling via SIGUSR2
bool enableSigUsr2_;
// Enable IPC Fabric instead of thrift communication
bool enableIpcFabric_;
std::chrono::seconds onDemandConfigUpdateIntervalSecs_;
// Logger Metadata
std::string requestTraceID_;
std::string requestGroupTraceID_;
// CUPTI Device Buffer
size_t cuptiDeviceBufferSize_;
size_t cuptiDeviceBufferPoolLimit_;
// CUPTI Timestamp Format
bool useTSCTimestamp_{true};
};
constexpr char kUseDaemonEnvVar[] = "KINETO_USE_DAEMON";
bool isDaemonEnvVarSet();
} // namespace libkineto
```
|
===================================================================================================================================
SOURCE CODE FILE: GenericTraceActivity.h
LINES: 1
SIZE: 3.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\GenericTraceActivity.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <fmt/format.h>
#include <sstream>
#include <string>
#include <thread>
#include <unordered_map>
#include <vector>
#include "ITraceActivity.h"
#include "ThreadUtil.h"
#include "TraceSpan.h"
namespace libkineto {
// Link type, used in GenericTraceActivity.flow.type
constexpr unsigned int kLinkFwdBwd = 1;
constexpr unsigned int kLinkAsyncCpuGpu = 2;
// @lint-ignore-every CLANGTIDY
// cppcoreguidelines-non-private-member-variables-in-classes
// @lint-ignore-every CLANGTIDY cppcoreguidelines-pro-type-member-init
class GenericTraceActivity : public ITraceActivity {
public:
GenericTraceActivity()
: activityType(ActivityType::ENUM_COUNT), traceSpan_(nullptr) {}
GenericTraceActivity(
const TraceSpan& trace,
ActivityType type,
const std::string& name)
: activityType(type), activityName(name), traceSpan_(&trace) {}
int64_t deviceId() const override {
return device;
}
int64_t resourceId() const override {
return resource;
}
int32_t getThreadId() const override {
return threadId;
}
int64_t timestamp() const override {
return startTime;
}
int64_t duration() const override {
return endTime - startTime;
}
int64_t correlationId() const override {
return id;
}
ActivityType type() const override {
return activityType;
}
const ITraceActivity* linkedActivity() const override {
return linked;
}
int flowType() const override {
return flow.type;
}
int64_t flowId() const override {
return flow.id;
}
bool flowStart() const override {
return flow.start;
}
const std::string name() const override {
return activityName;
}
const TraceSpan* traceSpan() const override {
return traceSpan_;
}
void log(ActivityLogger& logger) const override;
// Encode client side metadata as a key/value
template <typename ValType>
void addMetadata(const std::string& key, const ValType& value) {
metadataMap_.emplace(key, std::make_pair(fmt::format("{}", value), false));
}
void addMetadataQuoted(const std::string& key, const std::string& value) {
metadataMap_.emplace(key, std::make_pair(value, true));
}
const std::string getMetadataValue(const std::string& key) const override {
if (auto it = metadataMap_.find(key); it != metadataMap_.end()) {
return it->second.first;
}
return "";
}
const std::string metadataJson() const override {
std::stringstream json;
bool first = true;
for (const auto& [key, val] : metadataMap_) {
if (!first) {
json << ", ";
}
val.second ? json << fmt::format("\"{}\": \"{}\"", key, val.first)
: json << fmt::format("\"{}\": {}", key, val.first);
first = false;
}
return json.str();
}
virtual ~GenericTraceActivity() override {}
int64_t startTime{0};
int64_t endTime{0};
int32_t id{0};
int32_t device{0};
int32_t resource{0};
int32_t threadId{0};
ActivityType activityType;
std::string activityName;
struct Flow {
Flow() : id(0), type(0), start(0) {}
// Ids must be unique within each type
uint32_t id : 27;
// Type will be used to connect flows between profilers, as
// well as look up flow information (name etc)
uint32_t type : 4;
uint32_t start : 1;
} flow;
const ITraceActivity* linked{nullptr};
private:
const TraceSpan* traceSpan_;
// Metadata map: { key: (value, quoted)}
std::unordered_map<std::string, std::pair<std::string, bool>> metadataMap_;
};
} // namespace libkineto
```
|
================================================================================================================================
SOURCE CODE FILE: IActivityProfiler.h
LINES: 1
SIZE: 5.06 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\IActivityProfiler.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <memory>
#include <set>
#include <vector>
#include "Config.h"
#include "GenericTraceActivity.h"
/* This file includes an abstract base class for an activity profiler
* that can be implemented by multiple tracing agents in the application.
* The high level Kineto profiler can co-ordinate start and end of tracing
* and combine together events from multiple such activity profilers.
*/
namespace libkineto {
struct CpuTraceBuffer;
#ifdef _MSC_VER
// workaround for the predefined ERROR macro on Windows
#undef ERROR
#endif // _MSC_VER
enum class TraceStatus {
READY, // Accepting trace requests
WARMUP, // Performing trace warmup
RECORDING, // Actively collecting activities
PROCESSING, // Recording is complete, preparing results
ERROR, // One or more errors (and possibly also warnings) occurred.
WARNING, // One or more warnings occurred.
};
/* DeviceInfo:
* Can be used to specify process name, sort order, PID and device label.
* The sort order is determined by the sortIndex field to handle ordering of
* processes and gpu rows in the trace viewer.
*/
struct DeviceInfo {
DeviceInfo(
int64_t id,
int64_t sortIndex,
const std::string& name,
const std::string& label)
: id(id), sortIndex(sortIndex), name(name), label(label) {}
int64_t id; // process id
int64_t sortIndex; // position in trace view
const std::string name; // process name
const std::string label; // device label
};
/* ResourceInfo:
* Can be used to specify resource inside device
*/
struct ResourceInfo {
ResourceInfo(
int64_t deviceId,
int64_t id,
int64_t sortIndex,
const std::string& name)
: id(id), sortIndex(sortIndex), deviceId(deviceId), name(name) {}
int64_t id; // resource id
int64_t sortIndex; // position in trace view
int64_t deviceId; // id of device which owns this resource (specified in
// DeviceInfo.id)
const std::string name; // resource name
};
using getLinkedActivityCallback = std::function<const ITraceActivity*(int32_t)>;
/* IActivityProfilerSession:
* an opaque object that can be used by a high level profiler to
* start/stop and return trace events.
*/
class IActivityProfilerSession {
public:
virtual ~IActivityProfilerSession() {}
// start the trace collection synchronously
virtual void start() = 0;
// stop the trace collection synchronously
virtual void stop() = 0;
TraceStatus status() {
return status_;
}
// returns errors with this trace
virtual std::vector<std::string> errors() = 0;
// processes trace activities using logger
virtual void processTrace(ActivityLogger& logger) = 0;
virtual void processTrace(
ActivityLogger& logger,
getLinkedActivityCallback /*getLinkedActivity*/,
int64_t /*startTime*/,
int64_t /*endTime*/) {
processTrace(logger);
}
// returns device info used in this trace, could be nullptr
virtual std::unique_ptr<DeviceInfo> getDeviceInfo() = 0;
// returns resource info used in this trace, could be empty
virtual std::vector<ResourceInfo> getResourceInfos() = 0;
// release ownership of the trace events and metadata
virtual std::unique_ptr<CpuTraceBuffer> getTraceBuffer() = 0;
// XXX define trace formats
// virtual save(string name, TraceFormat format)
virtual void pushCorrelationId(uint64_t /*id*/) {}
virtual void popCorrelationId() {}
virtual void pushUserCorrelationId(uint64_t /*id*/) {}
virtual void popUserCorrelationId() {}
virtual std::string getDeviceProperties() {
return "";
}
protected:
TraceStatus status_ = TraceStatus::READY;
};
/* Activity Profiler Plugins:
* These allow other frameworks to integrate into Kineto's primariy
* activity profiler. While the primary activity profiler handles
* timing the trace collections and correlating events the plugins
* can become source of new trace activity types.
*/
class IActivityProfiler {
public:
virtual ~IActivityProfiler() {}
// name of profiler
virtual const std::string& name() const = 0;
// returns activity types this profiler supports
virtual const std::set<ActivityType>& availableActivities() const = 0;
// Calls prepare() on registered tracer providers passing in the relevant
// activity types. Returns a profiler session handle
virtual std::unique_ptr<IActivityProfilerSession> configure(
const std::set<ActivityType>& activity_types,
const Config& config) = 0;
// asynchronous version of the above with future timestamp and duration.
virtual std::unique_ptr<IActivityProfilerSession> configure(
int64_t ts_ms,
int64_t duration_ms,
const std::set<ActivityType>& activity_types,
const Config& config) = 0;
};
} // namespace libkineto
```
|
==============================================================================================================================
SOURCE CODE FILE: ILoggerObserver.h
LINES: 1
SIZE: 1.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\ILoggerObserver.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <string>
// Stages in libkineto used when pushing logs to UST Logger.
constexpr char kWarmUpStage[] = "Warm Up";
constexpr char kCollectionStage[] = "Collection";
constexpr char kPostProcessingStage[] = "Post Processing";
// Special string in UST for determining if traces are empty
constexpr char kEmptyTrace[] =
"No Valid Trace Events (CPU/GPU) found. Outputting empty trace.";
#if !USE_GOOGLE_LOG
#include <map>
#include <vector>
#include <stdint.h>
namespace libkineto {
enum LoggerOutputType {
VERBOSE = 0,
INFO = 1,
WARNING = 2,
STAGE = 3,
ERROR = 4,
ENUM_COUNT = 5
};
const char* toString(LoggerOutputType t);
LoggerOutputType toLoggerOutputType(const std::string& str);
constexpr int LoggerTypeCount = (int)LoggerOutputType::ENUM_COUNT;
class ILoggerObserver {
public:
virtual ~ILoggerObserver() = default;
virtual void write(const std::string& message, LoggerOutputType ot) = 0;
virtual const std::map<LoggerOutputType, std::vector<std::string>>
extractCollectorMetadata() = 0;
virtual void reset() = 0;
virtual void addDevice(const int64_t device) = 0;
virtual void setTraceDurationMS(const int64_t duration) = 0;
virtual void addEventCount(const int64_t count) = 0;
virtual void setTraceID(const std::string&) {}
virtual void setGroupTraceID(const std::string&) {}
virtual void addDestination(const std::string& dest) = 0;
virtual void setTriggerOnDemand() {}
virtual void addMetadata(
const std::string& key,
const std::string& value) = 0;
};
} // namespace libkineto
#endif // !USE_GOOGLE_LOG
```
|
=============================================================================================================================
SOURCE CODE FILE: ITraceActivity.h
LINES: 1
SIZE: 2.06 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\ITraceActivity.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <string>
#include "ActivityType.h"
namespace libkineto {
class ActivityLogger;
struct TraceSpan;
// Generic activity interface is borrowed from tensorboard protobuf format.
struct ITraceActivity {
virtual ~ITraceActivity() {}
// Device is a physical or logical entity, e.g. CPU, GPU or process
virtual int64_t deviceId() const = 0;
// A resource is something on the device, h/w thread,
// functional units etc.
virtual int64_t resourceId() const = 0;
// s/w thread
virtual int32_t getThreadId() const = 0;
// Start timestamp in nanoseconds
virtual int64_t timestamp() const = 0;
// Duration in nanoseconds
virtual int64_t duration() const = 0;
// Used to link up async activities
virtual int64_t correlationId() const = 0;
// Part of a flow, identified by flow id and type
virtual int flowType() const = 0;
virtual int64_t flowId() const = 0;
virtual bool flowStart() const = 0;
virtual ActivityType type() const = 0;
virtual const std::string name() const = 0;
// Optional linked activity
virtual const ITraceActivity* linkedActivity() const = 0;
// Optional containing trace object
virtual const TraceSpan* traceSpan() const = 0;
// Log activity
virtual void log(ActivityLogger& logger) const = 0;
// Return json formatted metadata
// FIXME: Return iterator to dynamic type map here instead
virtual const std::string metadataJson() const = 0;
// Return the metadata value in string format with key
// @lint-ignore CLANGTIDY: clang-diagnostic-unused-parameter
virtual const std::string getMetadataValue(const std::string& key) const {
return "";
}
static int64_t nsToUs(int64_t ns) {
// It's important that this conversion is the same everywhere.
// No rounding!
return ns / 1000;
}
};
} // namespace libkineto
```
|
=========================================================================================================================
SOURCE CODE FILE: LoggingAPI.h
LINES: 1
SIZE: 0.35 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\LoggingAPI.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
namespace libkineto {
int getLogSeverityLevel();
void setLogSeverityLevel(int level);
} // namespace libkineto
```
|
=========================================================================================================================
SOURCE CODE FILE: ThreadUtil.h
LINES: 1
SIZE: 0.88 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\ThreadUtil.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
namespace libkineto {
int32_t systemThreadId(bool cache = true);
int32_t threadId();
bool setThreadName(const std::string& name);
std::string getThreadName();
int32_t processId(bool cache = true);
std::string processName(int32_t pid);
// Return a list of pids and process names for the current process
// and its parents.
std::vector<std::pair<int32_t, std::string>> pidCommandPairsOfAncestors();
// Resets all cached Thread local state, this must be done on
// forks to prevent stale values from being retained.
void resetTLS();
} // namespace libkineto
```
|
========================================================================================================================
SOURCE CODE FILE: TraceSpan.h
LINES: 1
SIZE: 0.99 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\TraceSpan.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <atomic>
#include <string>
#include <thread>
namespace libkineto {
struct TraceSpan {
TraceSpan() = delete;
TraceSpan(int64_t startTime, int64_t endTime, std::string name)
: startTime(startTime), endTime(endTime), name(std::move(name)) {}
TraceSpan(int opCount, int it, std::string name, std::string prefix)
: opCount(opCount),
iteration(it),
name(std::move(name)),
prefix(std::move(prefix)) {}
// FIXME: change to duration?
int64_t startTime{0};
int64_t endTime{0};
int opCount{0};
int iteration{-1};
// Name is used to identify timeline
std::string name;
// Prefix used to distinguish trace spans on the same timeline
std::string prefix;
};
} // namespace libkineto
```
|
========================================================================================================================
SOURCE CODE FILE: libkineto.h
LINES: 1
SIZE: 3.95 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\libkineto.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// Mediator for initialization and profiler control
#pragma once
#include <atomic>
#include <chrono>
#include <deque>
#include <functional>
#include <memory>
#include <mutex>
#include <set>
#include <string>
#include <thread>
#include <vector>
#include "ActivityProfilerInterface.h"
#include "ActivityTraceInterface.h"
#include "ActivityType.h"
#include "ClientInterface.h"
#include "GenericTraceActivity.h"
#include "IActivityProfiler.h"
#include "ILoggerObserver.h"
#include "LoggingAPI.h"
#include "TraceSpan.h"
#include "ThreadUtil.h"
extern "C" {
void suppressLibkinetoLogMessages();
int InitializeInjection(void);
void libkineto_init(bool cpuOnly, bool logOnError);
bool hasTestEnvVar();
}
namespace libkineto {
class Config;
class ConfigLoader;
struct CpuTraceBuffer {
template <class... Args>
void emplace_activity(Args&&... args) {
activities.emplace_back(
std::make_unique<GenericTraceActivity>(std::forward<Args>(args)...));
}
static GenericTraceActivity& toRef(
std::unique_ptr<GenericTraceActivity>& ref) {
return *ref;
}
static const GenericTraceActivity& toRef(
const std::unique_ptr<GenericTraceActivity>& ref) {
return *ref;
}
TraceSpan span{0, 0, "none"};
int gpuOpCount;
std::deque<std::unique_ptr<GenericTraceActivity>> activities;
};
using ChildActivityProfilerFactory =
std::function<std::unique_ptr<IActivityProfiler>()>;
class LibkinetoApi {
public:
explicit LibkinetoApi(ConfigLoader& configLoader)
: configLoader_(configLoader) {}
// Called by client that supports tracing API.
// libkineto can still function without this.
void registerClient(ClientInterface* client);
// Called by libkineto on init
void registerProfiler(std::unique_ptr<ActivityProfilerInterface> profiler) {
activityProfiler_ = std::move(profiler);
initClientIfRegistered();
}
ActivityProfilerInterface& activityProfiler() {
return *activityProfiler_;
}
ClientInterface* client() {
return client_;
}
void initProfilerIfRegistered() {
static std::once_flag once;
if (activityProfiler_) {
std::call_once(once, [this] {
if (!activityProfiler_->isInitialized()) {
activityProfiler_->init();
initChildActivityProfilers();
}
});
}
}
bool isProfilerInitialized() const {
return activityProfiler_ && activityProfiler_->isInitialized();
}
bool isProfilerRegistered() const {
return activityProfiler_ != nullptr;
}
void suppressLogMessages() {
suppressLibkinetoLogMessages();
}
void resetKinetoTLS() {
resetTLS();
}
// Provides access to profier configuration manaegement
ConfigLoader& configLoader() {
return configLoader_;
}
void registerProfilerFactory(ChildActivityProfilerFactory factory) {
if (isProfilerInitialized()) {
activityProfiler_->addChildActivityProfiler(factory());
} else {
childProfilerFactories_.push_back(factory);
}
}
private:
void initChildActivityProfilers() {
if (!isProfilerInitialized()) {
return;
}
for (const auto& factory : childProfilerFactories_) {
activityProfiler_->addChildActivityProfiler(factory());
}
childProfilerFactories_.clear();
}
// Client is initialized once both it and libkineto has registered
void initClientIfRegistered();
ConfigLoader& configLoader_;
std::unique_ptr<ActivityProfilerInterface> activityProfiler_{};
ClientInterface* client_{};
int32_t clientRegisterThread_{0};
std::vector<ChildActivityProfilerFactory> childProfilerFactories_;
};
// Singleton
LibkinetoApi& api();
} // namespace libkineto
```
|
==========================================================================================================================
SOURCE CODE FILE: output_base.h
LINES: 1
SIZE: 2.11 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\output_base.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <fstream>
#include <map>
#include <ostream>
#include <thread>
#include <unordered_map>
// TODO(T90238193)
// @lint-ignore-every CLANGTIDY facebook-hte-RelativeInclude
#include "GenericTraceActivity.h"
#include "IActivityProfiler.h"
#include "ThreadUtil.h"
#include "TraceSpan.h"
namespace KINETO_NAMESPACE {
struct ActivityBuffers;
}
namespace libkineto {
using namespace KINETO_NAMESPACE;
// Used by sortIndex to put GPU tracks at the bottom
// of the trace timelines. The largest valid CPU PID is 4,194,304,
// so 5000000 is enough to guarantee that GPU tracks are sorted after CPU.
constexpr int64_t kExceedMaxPid = 5000000;
class ActivityLogger {
public:
virtual ~ActivityLogger() = default;
struct OverheadInfo {
explicit OverheadInfo(const std::string& name) : name(name) {}
const std::string name;
};
virtual void handleDeviceInfo(const DeviceInfo& info, uint64_t time) = 0;
virtual void handleResourceInfo(const ResourceInfo& info, int64_t time) = 0;
virtual void handleOverheadInfo(const OverheadInfo& info, int64_t time) = 0;
virtual void handleTraceSpan(const TraceSpan& span) = 0;
virtual void handleActivity(const libkineto::ITraceActivity& activity) = 0;
virtual void handleGenericActivity(
const libkineto::GenericTraceActivity& activity) = 0;
virtual void handleTraceStart(
const std::unordered_map<std::string, std::string>& metadata,
const std::string& device_properties) = 0;
void handleTraceStart() {
handleTraceStart(std::unordered_map<std::string, std::string>(), "");
}
virtual void finalizeTrace(
const Config& config,
std::unique_ptr<ActivityBuffers> buffers,
int64_t endTime,
std::unordered_map<std::string, std::vector<std::string>>& metadata) = 0;
protected:
ActivityLogger() = default;
};
} // namespace libkineto
```
|
===============================================================================================================================
SOURCE CODE FILE: time_since_epoch.h
LINES: 1
SIZE: 0.52 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\kineto\time_since_epoch.h
ENCODING: utf-8
```h
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <chrono>
namespace libkineto {
template <class ClockT>
inline int64_t timeSinceEpoch(const std::chrono::time_point<ClockT>& t) {
return std::chrono::duration_cast<std::chrono::nanoseconds>(
t.time_since_epoch())
.count();
}
} // namespace libkineto
```
|
==============================================================================================================
SOURCE CODE FILE: libshm.h
LINES: 1
SIZE: 0.80 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\libshm.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/MapAllocator.h>
#ifdef __cplusplus
#ifdef SHM_EXPORTS
#define SHM_API __declspec(dllexport)
#else
#define SHM_API __declspec(dllimport)
#endif
SHM_API void libshm_init(const char* manager_exec_path);
class SHM_API THManagedMapAllocator : public at::RefcountedMapAllocator {
public:
THManagedMapAllocator(
const char* manager_handle,
const char* filename,
int flags,
size_t size)
: at::RefcountedMapAllocator(filename, flags, size) {}
static at::DataPtr makeDataPtr(
const char* manager_handle,
const char* filename,
int flags,
size_t size);
static THManagedMapAllocator* fromDataPtr(const at::DataPtr&);
const char* manager_handle() const {
return "no_manager";
}
};
#endif
```
|
=============================================================================================================
SOURCE CODE FILE: psimd.h
LINES: 1
SIZE: 45.79 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\psimd.h
ENCODING: utf-8
```h
#pragma once
#ifndef PSIMD_H
#define PSIMD_H
#if defined(__CUDA_ARCH__)
/* CUDA compiler */
#define PSIMD_INTRINSIC __forceinline__ __device__
#elif defined(__OPENCL_VERSION__)
/* OpenCL compiler */
#define PSIMD_INTRINSIC inline static
#elif defined(__INTEL_COMPILER)
/* Intel compiler, even on Windows */
#define PSIMD_INTRINSIC inline static __attribute__((__always_inline__))
#elif defined(__GNUC__)
/* GCC-compatible compiler (gcc/clang/icc) */
#define PSIMD_INTRINSIC inline static __attribute__((__always_inline__))
#elif defined(_MSC_VER)
/* MSVC-compatible compiler (cl/icl/clang-cl) */
#define PSIMD_INTRINSIC __forceinline static
#elif defined(__cplusplus)
/* Generic C++ compiler */
#define PSIMD_INTRINSIC inline static
#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
/* Generic C99 compiler */
#define PSIMD_INTRINSIC inline static
#else
/* Generic C compiler */
#define PSIMD_INTRINSIC static
#endif
#if defined(__GNUC__) || defined(__clang__)
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
#endif
#if defined(__SSE2__)
#include <emmintrin.h>
#endif
#if defined(__SSE3__)
#include <pmmintrin.h>
#endif
#if defined(__SSSE3__)
#include <tmmintrin.h>
#endif
#if defined(__SSE4_1__)
#include <smmintrin.h>
#endif
#if defined(__SSE4_2__)
#include <nmmintrin.h>
#endif
#if defined(__AVX__)
#include <immintrin.h>
#endif
#elif defined(_MSC_VER)
#include <intrin.h>
#endif
#if defined(__cplusplus)
#define PSIMD_CXX_SYNTAX
#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
#define PSIMD_C11_SYNTAX
#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
#define PSIMD_C99_SYNTAX
#else
#define PSIMD_C89_SYNTAX
#endif
#if defined(__cplusplus) && (__cplusplus >= 201103L)
#include <cstddef>
#include <cstdint>
#elif !defined(__OPENCL_VERSION__)
#include <stddef.h>
#include <stdint.h>
#endif
#if defined(__GNUC__) || defined(__clang__)
#define PSIMD_HAVE_F64 0
#define PSIMD_HAVE_F32 1
#define PSIMD_HAVE_U8 1
#define PSIMD_HAVE_S8 1
#define PSIMD_HAVE_U16 1
#define PSIMD_HAVE_S16 1
#define PSIMD_HAVE_U32 1
#define PSIMD_HAVE_S32 1
#define PSIMD_HAVE_U64 0
#define PSIMD_HAVE_S64 0
typedef int8_t psimd_s8 __attribute__((vector_size(16), aligned(1)));
typedef uint8_t psimd_u8 __attribute__((vector_size(16), aligned(1)));
typedef int16_t psimd_s16 __attribute__((vector_size(16), aligned(2)));
typedef uint16_t psimd_u16 __attribute__((vector_size(16), aligned(2)));
typedef int32_t psimd_s32 __attribute__((vector_size(16), aligned(4)));
typedef uint32_t psimd_u32 __attribute__((vector_size(16), aligned(4)));
typedef float psimd_f32 __attribute__((vector_size(16), aligned(4)));
typedef struct {
psimd_s8 lo;
psimd_s8 hi;
} psimd_s8x2;
typedef struct {
psimd_u8 lo;
psimd_u8 hi;
} psimd_u8x2;
typedef struct {
psimd_s16 lo;
psimd_s16 hi;
} psimd_s16x2;
typedef struct {
psimd_u16 lo;
psimd_u16 hi;
} psimd_u16x2;
typedef struct {
psimd_s32 lo;
psimd_s32 hi;
} psimd_s32x2;
typedef struct {
psimd_u32 lo;
psimd_u32 hi;
} psimd_u32x2;
typedef struct {
psimd_f32 lo;
psimd_f32 hi;
} psimd_f32x2;
/* Bit casts */
PSIMD_INTRINSIC psimd_u32x2 psimd_cast_s32x2_u32x2(psimd_s32x2 v) {
return (psimd_u32x2) { .lo = (psimd_u32) v.lo, .hi = (psimd_u32) v.hi };
}
PSIMD_INTRINSIC psimd_f32x2 psimd_cast_s32x2_f32x2(psimd_s32x2 v) {
return (psimd_f32x2) { .lo = (psimd_f32) v.lo, .hi = (psimd_f32) v.hi };
}
PSIMD_INTRINSIC psimd_s32x2 psimd_cast_u32x2_s32x2(psimd_u32x2 v) {
return (psimd_s32x2) { .lo = (psimd_s32) v.lo, .hi = (psimd_s32) v.hi };
}
PSIMD_INTRINSIC psimd_f32x2 psimd_cast_u32x2_f32x2(psimd_u32x2 v) {
return (psimd_f32x2) { .lo = (psimd_f32) v.lo, .hi = (psimd_f32) v.hi };
}
PSIMD_INTRINSIC psimd_s32x2 psimd_cast_f32x2_s32x2(psimd_f32x2 v) {
return (psimd_s32x2) { .lo = (psimd_s32) v.lo, .hi = (psimd_s32) v.hi };
}
PSIMD_INTRINSIC psimd_u32x2 psimd_cast_f32x2_u32x2(psimd_f32x2 v) {
return (psimd_u32x2) { .lo = (psimd_u32) v.lo, .hi = (psimd_u32) v.hi };
}
/* Swap */
PSIMD_INTRINSIC void psimd_swap_s8(psimd_s8 a[1], psimd_s8 b[1]) {
const psimd_s8 new_a = *b;
const psimd_s8 new_b = *a;
*a = new_a;
*b = new_b;
}
PSIMD_INTRINSIC void psimd_swap_u8(psimd_u8 a[1], psimd_u8 b[1]) {
const psimd_u8 new_a = *b;
const psimd_u8 new_b = *a;
*a = new_a;
*b = new_b;
}
PSIMD_INTRINSIC void psimd_swap_s16(psimd_s16 a[1], psimd_s16 b[1]) {
const psimd_s16 new_a = *b;
const psimd_s16 new_b = *a;
*a = new_a;
*b = new_b;
}
PSIMD_INTRINSIC void psimd_swap_u16(psimd_u16 a[1], psimd_u16 b[1]) {
const psimd_u16 new_a = *b;
const psimd_u16 new_b = *a;
*a = new_a;
*b = new_b;
}
PSIMD_INTRINSIC void psimd_swap_s32(psimd_s32 a[1], psimd_s32 b[1]) {
const psimd_s32 new_a = *b;
const psimd_s32 new_b = *a;
*a = new_a;
*b = new_b;
}
PSIMD_INTRINSIC void psimd_swap_u32(psimd_u32 a[1], psimd_u32 b[1]) {
const psimd_u32 new_a = *b;
const psimd_u32 new_b = *a;
*a = new_a;
*b = new_b;
}
PSIMD_INTRINSIC void psimd_swap_f32(psimd_f32 a[1], psimd_f32 b[1]) {
const psimd_f32 new_a = *b;
const psimd_f32 new_b = *a;
*a = new_a;
*b = new_b;
}
/* Zero-initialization */
PSIMD_INTRINSIC psimd_s8 psimd_zero_s8(void) {
return (psimd_s8) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
}
PSIMD_INTRINSIC psimd_u8 psimd_zero_u8(void) {
return (psimd_u8) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
}
PSIMD_INTRINSIC psimd_s16 psimd_zero_s16(void) {
return (psimd_s16) { 0, 0, 0, 0, 0, 0, 0, 0 };
}
PSIMD_INTRINSIC psimd_u16 psimd_zero_u16(void) {
return (psimd_u16) { 0, 0, 0, 0, 0, 0, 0, 0 };
}
PSIMD_INTRINSIC psimd_s32 psimd_zero_s32(void) {
return (psimd_s32) { 0, 0, 0, 0 };
}
PSIMD_INTRINSIC psimd_u32 psimd_zero_u32(void) {
return (psimd_u32) { 0, 0, 0, 0 };
}
PSIMD_INTRINSIC psimd_f32 psimd_zero_f32(void) {
return (psimd_f32) { 0.0f, 0.0f, 0.0f, 0.0f };
}
/* Initialization to the same constant */
PSIMD_INTRINSIC psimd_s8 psimd_splat_s8(int8_t c) {
return (psimd_s8) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c };
}
PSIMD_INTRINSIC psimd_u8 psimd_splat_u8(uint8_t c) {
return (psimd_u8) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c };
}
PSIMD_INTRINSIC psimd_s16 psimd_splat_s16(int16_t c) {
return (psimd_s16) { c, c, c, c, c, c, c, c };
}
PSIMD_INTRINSIC psimd_u16 psimd_splat_u16(uint16_t c) {
return (psimd_u16) { c, c, c, c, c, c, c, c };
}
PSIMD_INTRINSIC psimd_s32 psimd_splat_s32(int32_t c) {
return (psimd_s32) { c, c, c, c };
}
PSIMD_INTRINSIC psimd_u32 psimd_splat_u32(uint32_t c) {
return (psimd_u32) { c, c, c, c };
}
PSIMD_INTRINSIC psimd_f32 psimd_splat_f32(float c) {
return (psimd_f32) { c, c, c, c };
}
/* Load vector */
PSIMD_INTRINSIC psimd_s8 psimd_load_s8(const void* address) {
return *((const psimd_s8*) address);
}
PSIMD_INTRINSIC psimd_u8 psimd_load_u8(const void* address) {
return *((const psimd_u8*) address);
}
PSIMD_INTRINSIC psimd_s16 psimd_load_s16(const void* address) {
return *((const psimd_s16*) address);
}
PSIMD_INTRINSIC psimd_u16 psimd_load_u16(const void* address) {
return *((const psimd_u16*) address);
}
PSIMD_INTRINSIC psimd_s32 psimd_load_s32(const void* address) {
return *((const psimd_s32*) address);
}
PSIMD_INTRINSIC psimd_u32 psimd_load_u32(const void* address) {
return *((const psimd_u32*) address);
}
PSIMD_INTRINSIC psimd_f32 psimd_load_f32(const void* address) {
return *((const psimd_f32*) address);
}
PSIMD_INTRINSIC psimd_s8 psimd_load_splat_s8(const void* address) {
return psimd_splat_s8(*((const int8_t*) address));
}
PSIMD_INTRINSIC psimd_u8 psimd_load_splat_u8(const void* address) {
return psimd_splat_u8(*((const uint8_t*) address));
}
PSIMD_INTRINSIC psimd_s16 psimd_load_splat_s16(const void* address) {
return psimd_splat_s16(*((const int16_t*) address));
}
PSIMD_INTRINSIC psimd_u16 psimd_load_splat_u16(const void* address) {
return psimd_splat_u16(*((const uint16_t*) address));
}
PSIMD_INTRINSIC psimd_s32 psimd_load_splat_s32(const void* address) {
return psimd_splat_s32(*((const int32_t*) address));
}
PSIMD_INTRINSIC psimd_u32 psimd_load_splat_u32(const void* address) {
return psimd_splat_u32(*((const uint32_t*) address));
}
PSIMD_INTRINSIC psimd_f32 psimd_load_splat_f32(const void* address) {
return psimd_splat_f32(*((const float*) address));
}
PSIMD_INTRINSIC psimd_s32 psimd_load1_s32(const void* address) {
return (psimd_s32) { *((const int32_t*) address), 0, 0, 0 };
}
PSIMD_INTRINSIC psimd_u32 psimd_load1_u32(const void* address) {
return (psimd_u32) { *((const uint32_t*) address), 0, 0, 0 };
}
PSIMD_INTRINSIC psimd_f32 psimd_load1_f32(const void* address) {
return (psimd_f32) { *((const float*) address), 0.0f, 0.0f, 0.0f };
}
PSIMD_INTRINSIC psimd_s32 psimd_load2_s32(const void* address) {
const int32_t* address_s32 = (const int32_t*) address;
return (psimd_s32) { address_s32[0], address_s32[1], 0, 0 };
}
PSIMD_INTRINSIC psimd_u32 psimd_load2_u32(const void* address) {
const uint32_t* address_u32 = (const uint32_t*) address;
return (psimd_u32) { address_u32[0], address_u32[1], 0, 0 };
}
PSIMD_INTRINSIC psimd_f32 psimd_load2_f32(const void* address) {
const float* address_f32 = (const float*) address;
return (psimd_f32) { address_f32[0], address_f32[1], 0.0f, 0.0f };
}
PSIMD_INTRINSIC psimd_s32 psimd_load3_s32(const void* address) {
const int32_t* address_s32 = (const int32_t*) address;
return (psimd_s32) { address_s32[0], address_s32[1], address_s32[2], 0 };
}
PSIMD_INTRINSIC psimd_u32 psimd_load3_u32(const void* address) {
const uint32_t* address_u32 = (const uint32_t*) address;
return (psimd_u32) { address_u32[0], address_u32[1], address_u32[2], 0 };
}
PSIMD_INTRINSIC psimd_f32 psimd_load3_f32(const void* address) {
const float* address_f32 = (const float*) address;
return (psimd_f32) { address_f32[0], address_f32[1], address_f32[2], 0.0f };
}
PSIMD_INTRINSIC psimd_s32 psimd_load4_s32(const void* address) {
return psimd_load_s32(address);
}
PSIMD_INTRINSIC psimd_u32 psimd_load4_u32(const void* address) {
return psimd_load_u32(address);
}
PSIMD_INTRINSIC psimd_f32 psimd_load4_f32(const void* address) {
return psimd_load_f32(address);
}
PSIMD_INTRINSIC psimd_f32 psimd_load_stride2_f32(const void* address) {
const psimd_f32 v0x1x = psimd_load_f32(address);
const psimd_f32 vx2x3 = psimd_load_f32((const float*) address + 3);
#if defined(__clang__)
return __builtin_shufflevector(v0x1x, vx2x3, 0, 2, 5, 7);
#else
return __builtin_shuffle(v0x1x, vx2x3, (psimd_s32) { 0, 2, 5, 7 });
#endif
}
PSIMD_INTRINSIC psimd_f32 psimd_load1_stride2_f32(const void* address) {
return psimd_load_f32(address);
}
PSIMD_INTRINSIC psimd_f32 psimd_load2_stride2_f32(const void* address) {
const float* address_f32 = (const float*) address;
return (psimd_f32) { address_f32[0], address_f32[2], 0.0f, 0.0f };
}
PSIMD_INTRINSIC psimd_f32 psimd_load3_stride2_f32(const void* address) {
const psimd_f32 v0x1x = psimd_load_f32(address);
const psimd_f32 v2zzz = psimd_load1_f32((const float*) address + 2);
#if defined(__clang__)
return __builtin_shufflevector(v0x1x, v2zzz, 0, 2, 4, 6);
#else
return __builtin_shuffle(v0x1x, v2zzz, (psimd_s32) { 0, 2, 4, 6 });
#endif
}
PSIMD_INTRINSIC psimd_f32 psimd_load4_stride2_f32(const void* address) {
return psimd_load_stride2_f32(address);
}
PSIMD_INTRINSIC psimd_f32 psimd_load_stride_f32(const void* address, size_t stride) {
const float* address0_f32 = (const float*) address;
const float* address1_f32 = address0_f32 + stride;
const float* address2_f32 = address1_f32 + stride;
const float* address3_f32 = address2_f32 + stride;
return (psimd_f32) { *address0_f32, *address1_f32, *address2_f32, *address3_f32 };
}
PSIMD_INTRINSIC psimd_f32 psimd_load1_stride_f32(const void* address, size_t stride) {
return psimd_load1_f32(address);
}
PSIMD_INTRINSIC psimd_f32 psimd_load2_stride_f32(const void* address, size_t stride) {
const float* address_f32 = (const float*) address;
return (psimd_f32) { address_f32[0], address_f32[stride], 0.0f, 0.0f };
}
PSIMD_INTRINSIC psimd_f32 psimd_load3_stride_f32(const void* address, size_t stride) {
const float* address0_f32 = (const float*) address;
const float* address1_f32 = address0_f32 + stride;
const float* address2_f32 = address1_f32 + stride;
return (psimd_f32) { *address0_f32, *address1_f32, *address2_f32, 0.0f };
}
PSIMD_INTRINSIC psimd_f32 psimd_load4_stride_f32(const void* address, size_t stride) {
return psimd_load_stride_f32(address, stride);
}
/* Store vector */
PSIMD_INTRINSIC void psimd_store_s8(void* address, psimd_s8 value) {
*((psimd_s8*) address) = value;
}
PSIMD_INTRINSIC void psimd_store_u8(void* address, psimd_u8 value) {
*((psimd_u8*) address) = value;
}
PSIMD_INTRINSIC void psimd_store_s16(void* address, psimd_s16 value) {
*((psimd_s16*) address) = value;
}
PSIMD_INTRINSIC void psimd_store_u16(void* address, psimd_u16 value) {
*((psimd_u16*) address) = value;
}
PSIMD_INTRINSIC void psimd_store_s32(void* address, psimd_s32 value) {
*((psimd_s32*) address) = value;
}
PSIMD_INTRINSIC void psimd_store_u32(void* address, psimd_u32 value) {
*((psimd_u32*) address) = value;
}
PSIMD_INTRINSIC void psimd_store_f32(void* address, psimd_f32 value) {
*((psimd_f32*) address) = value;
}
PSIMD_INTRINSIC void psimd_store1_s32(void* address, psimd_s32 value) {
*((int32_t*) address) = value[0];
}
PSIMD_INTRINSIC void psimd_store1_u32(void* address, psimd_u32 value) {
*((uint32_t*) address) = value[0];
}
PSIMD_INTRINSIC void psimd_store1_f32(void* address, psimd_f32 value) {
*((float*) address) = value[0];
}
PSIMD_INTRINSIC void psimd_store2_s32(void* address, psimd_s32 value) {
int32_t* address_s32 = (int32_t*) address;
address_s32[0] = value[0];
address_s32[1] = value[1];
}
PSIMD_INTRINSIC void psimd_store2_u32(void* address, psimd_u32 value) {
uint32_t* address_u32 = (uint32_t*) address;
address_u32[0] = value[0];
address_u32[1] = value[1];
}
PSIMD_INTRINSIC void psimd_store2_f32(void* address, psimd_f32 value) {
float* address_f32 = (float*) address;
address_f32[0] = value[0];
address_f32[1] = value[1];
}
PSIMD_INTRINSIC void psimd_store3_s32(void* address, psimd_s32 value) {
int32_t* address_s32 = (int32_t*) address;
address_s32[0] = value[0];
address_s32[1] = value[1];
address_s32[2] = value[2];
}
PSIMD_INTRINSIC void psimd_store3_u32(void* address, psimd_u32 value) {
uint32_t* address_u32 = (uint32_t*) address;
address_u32[0] = value[0];
address_u32[1] = value[1];
address_u32[2] = value[2];
}
PSIMD_INTRINSIC void psimd_store3_f32(void* address, psimd_f32 value) {
float* address_f32 = (float*) address;
address_f32[0] = value[0];
address_f32[1] = value[1];
address_f32[2] = value[2];
}
PSIMD_INTRINSIC void psimd_store4_s32(void* address, psimd_s32 value) {
psimd_store_s32(address, value);
}
PSIMD_INTRINSIC void psimd_store4_u32(void* address, psimd_u32 value) {
psimd_store_u32(address, value);
}
PSIMD_INTRINSIC void psimd_store4_f32(void* address, psimd_f32 value) {
psimd_store_f32(address, value);
}
PSIMD_INTRINSIC void psimd_store_stride_f32(void* address, size_t stride, psimd_f32 value) {
float* address0_f32 = (float*) address;
float* address1_f32 = address0_f32 + stride;
float* address2_f32 = address1_f32 + stride;
float* address3_f32 = address2_f32 + stride;
*address0_f32 = value[0];
*address1_f32 = value[1];
*address2_f32 = value[2];
*address3_f32 = value[3];
}
PSIMD_INTRINSIC void psimd_store1_stride_f32(void* address, size_t stride, psimd_f32 value) {
psimd_store1_f32(address, value);
}
PSIMD_INTRINSIC void psimd_store2_stride_f32(void* address, size_t stride, psimd_f32 value) {
float* address_f32 = (float*) address;
address_f32[0] = value[0];
address_f32[stride] = value[1];
}
PSIMD_INTRINSIC void psimd_store3_stride_f32(void* address, size_t stride, psimd_f32 value) {
float* address0_f32 = (float*) address;
float* address1_f32 = address0_f32 + stride;
float* address2_f32 = address1_f32 + stride;
*address0_f32 = value[0];
*address1_f32 = value[1];
*address2_f32 = value[2];
}
/* Vector addition */
PSIMD_INTRINSIC psimd_s8 psimd_add_s8(psimd_s8 a, psimd_s8 b) {
return a + b;
}
PSIMD_INTRINSIC psimd_u8 psimd_add_u8(psimd_u8 a, psimd_u8 b) {
return a + b;
}
PSIMD_INTRINSIC psimd_s16 psimd_add_s16(psimd_s16 a, psimd_s16 b) {
return a + b;
}
PSIMD_INTRINSIC psimd_u16 psimd_add_u16(psimd_u16 a, psimd_u16 b) {
return a + b;
}
PSIMD_INTRINSIC psimd_s32 psimd_add_s32(psimd_s32 a, psimd_s32 b) {
return a + b;
}
PSIMD_INTRINSIC psimd_u32 psimd_add_u32(psimd_u32 a, psimd_u32 b) {
return a + b;
}
PSIMD_INTRINSIC psimd_f32 psimd_add_f32(psimd_f32 a, psimd_f32 b) {
#if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__)
return (psimd_f32) vaddq_f32((float32x4_t) a, (float32x4_t) b);
#else
return a + b;
#endif
}
/* Vector subtraction */
PSIMD_INTRINSIC psimd_s8 psimd_sub_s8(psimd_s8 a, psimd_s8 b) {
return a - b;
}
PSIMD_INTRINSIC psimd_u8 psimd_sub_u8(psimd_u8 a, psimd_u8 b) {
return a - b;
}
PSIMD_INTRINSIC psimd_s16 psimd_sub_s16(psimd_s16 a, psimd_s16 b) {
return a - b;
}
PSIMD_INTRINSIC psimd_u16 psimd_sub_u16(psimd_u16 a, psimd_u16 b) {
return a - b;
}
PSIMD_INTRINSIC psimd_s32 psimd_sub_s32(psimd_s32 a, psimd_s32 b) {
return a - b;
}
PSIMD_INTRINSIC psimd_u32 psimd_sub_u32(psimd_u32 a, psimd_u32 b) {
return a - b;
}
PSIMD_INTRINSIC psimd_f32 psimd_sub_f32(psimd_f32 a, psimd_f32 b) {
#if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__)
return (psimd_f32) vsubq_f32((float32x4_t) a, (float32x4_t) b);
#else
return a - b;
#endif
}
/* Vector multiplication */
PSIMD_INTRINSIC psimd_s8 psimd_mul_s8(psimd_s8 a, psimd_s8 b) {
return a * b;
}
PSIMD_INTRINSIC psimd_u8 psimd_mul_u8(psimd_u8 a, psimd_u8 b) {
return a * b;
}
PSIMD_INTRINSIC psimd_s16 psimd_mul_s16(psimd_s16 a, psimd_s16 b) {
return a * b;
}
PSIMD_INTRINSIC psimd_u16 psimd_mul_u16(psimd_u16 a, psimd_u16 b) {
return a * b;
}
PSIMD_INTRINSIC psimd_s32 psimd_mul_s32(psimd_s32 a, psimd_s32 b) {
return a * b;
}
PSIMD_INTRINSIC psimd_u32 psimd_mul_u32(psimd_u32 a, psimd_u32 b) {
return a * b;
}
PSIMD_INTRINSIC psimd_f32 psimd_mul_f32(psimd_f32 a, psimd_f32 b) {
#if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__)
return (psimd_f32) vmulq_f32((float32x4_t) a, (float32x4_t) b);
#else
return a * b;
#endif
}
/* Quasi-Fused Multiply-Add */
PSIMD_INTRINSIC psimd_f32 psimd_qfma_f32(psimd_f32 a, psimd_f32 b, psimd_f32 c) {
#if defined(__aarch64__) || defined(__ARM_NEON__) && defined(__ARM_FEATURE_FMA)
return (psimd_f32) vfmaq_f32((float32x4_t) a, (float32x4_t) b, (float32x4_t) c);
#elif (defined(__x86_64__) || defined(__i386__) || defined(__i686__)) && defined(__FMA__)
return (psimd_f32) _mm_fmadd_ps((__m128) b, (__m128) c, (__m128) a);
#elif (defined(__x86_64__) || defined(__i386__) || defined(__i686__)) && defined(__FMA4__)
return (psimd_f32) _mm_macc_ps((__m128) b, (__m128) c, (__m128) a);
#elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) && PSIMD_ENABLE_WASM_QFMA
return (psimd_f32) __builtin_wasm_qfma_f32x4(a, b, c);
#else
return a + b * c;
#endif
}
PSIMD_INTRINSIC psimd_f32 psimd_div_f32(psimd_f32 a, psimd_f32 b) {
return a / b;
}
/* Vector and */
PSIMD_INTRINSIC psimd_f32 psimd_andmask_f32(psimd_s32 mask, psimd_f32 v) {
return (psimd_f32) (mask & (psimd_s32) v);
}
/* Vector and-not */
PSIMD_INTRINSIC psimd_f32 psimd_andnotmask_f32(psimd_s32 mask, psimd_f32 v) {
return (psimd_f32) (~mask & (psimd_s32) v);
}
/* Vector blend */
PSIMD_INTRINSIC psimd_s8 psimd_blend_s8(psimd_s8 mask, psimd_s8 a, psimd_s8 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_s8) vbslq_s8((uint8x16_t) mask, (int8x16_t) a, (int8x16_t) b);
#elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
return (psimd_s8) __builtin_wasm_bitselect(a, b, mask);
#else
return (mask & a) | (~mask & b);
#endif
}
PSIMD_INTRINSIC psimd_u8 psimd_blend_u8(psimd_s8 mask, psimd_u8 a, psimd_u8 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_u8) vbslq_u8((uint8x16_t) mask, (uint8x16_t) a, (uint8x16_t) b);
#elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
return (psimd_u8) __builtin_wasm_bitselect(a, b, mask);
#else
return (psimd_u8) ((mask & (psimd_s8) a) | (~mask & (psimd_s8) b));
#endif
}
PSIMD_INTRINSIC psimd_s16 psimd_blend_s16(psimd_s16 mask, psimd_s16 a, psimd_s16 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_s16) vbslq_s16((uint16x8_t) mask, (int16x8_t) a, (int16x8_t) b);
#elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
return (psimd_s16) __builtin_wasm_bitselect(a, b, mask);
#else
return (mask & a) | (~mask & b);
#endif
}
PSIMD_INTRINSIC psimd_u16 psimd_blend_u16(psimd_s16 mask, psimd_u16 a, psimd_u16 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_u16) vbslq_u16((uint16x8_t) mask, (uint16x8_t) a, (uint16x8_t) b);
#elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
return (psimd_u16) __builtin_wasm_bitselect(a, b, mask);
#else
return (psimd_u16) ((mask & (psimd_s16) a) | (~mask & (psimd_s16) b));
#endif
}
PSIMD_INTRINSIC psimd_s32 psimd_blend_s32(psimd_s32 mask, psimd_s32 a, psimd_s32 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_s32) vbslq_s32((uint32x4_t) mask, (int32x4_t) a, (int32x4_t) b);
#elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
return (psimd_s32) __builtin_wasm_bitselect(a, b, mask);
#else
return (mask & a) | (~mask & b);
#endif
}
PSIMD_INTRINSIC psimd_u32 psimd_blend_u32(psimd_s32 mask, psimd_u32 a, psimd_u32 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_u32) vbslq_u32((uint32x4_t) mask, (uint32x4_t) a, (uint32x4_t) b);
#elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
return (psimd_u32) __builtin_wasm_bitselect(a, b, mask);
#else
return (psimd_u32) ((mask & (psimd_s32) a) | (~mask & (psimd_s32) b));
#endif
}
PSIMD_INTRINSIC psimd_f32 psimd_blend_f32(psimd_s32 mask, psimd_f32 a, psimd_f32 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_f32) vbslq_f32((uint32x4_t) mask, (float32x4_t) a, (float32x4_t) b);
#elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
return (psimd_f32) __builtin_wasm_bitselect(a, b, mask);
#else
return (psimd_f32) ((mask & (psimd_s32) a) | (~mask & (psimd_s32) b));
#endif
}
/* Vector blend on sign */
PSIMD_INTRINSIC psimd_s8 psimd_signblend_s8(psimd_s8 x, psimd_s8 a, psimd_s8 b) {
return psimd_blend_s8(x >> psimd_splat_s8(7), a, b);
}
PSIMD_INTRINSIC psimd_u8 psimd_signblend_u8(psimd_s8 x, psimd_u8 a, psimd_u8 b) {
return psimd_blend_u8((x >> psimd_splat_s8(7)), a, b);
}
PSIMD_INTRINSIC psimd_s16 psimd_signblend_s16(psimd_s16 x, psimd_s16 a, psimd_s16 b) {
return psimd_blend_s16(x >> psimd_splat_s16(15), a, b);
}
PSIMD_INTRINSIC psimd_u16 psimd_signblend_u16(psimd_s16 x, psimd_u16 a, psimd_u16 b) {
return psimd_blend_u16((x >> psimd_splat_s16(15)), a, b);
}
PSIMD_INTRINSIC psimd_s32 psimd_signblend_s32(psimd_s32 x, psimd_s32 a, psimd_s32 b) {
return psimd_blend_s32(x >> psimd_splat_s32(31), a, b);
}
PSIMD_INTRINSIC psimd_u32 psimd_signblend_u32(psimd_s32 x, psimd_u32 a, psimd_u32 b) {
return psimd_blend_u32((x >> psimd_splat_s32(31)), a, b);
}
PSIMD_INTRINSIC psimd_f32 psimd_signblend_f32(psimd_f32 x, psimd_f32 a, psimd_f32 b) {
const psimd_s32 mask = (psimd_s32) x >> psimd_splat_s32(31);
return psimd_blend_f32(mask, a, b);
}
/* Vector absolute value */
PSIMD_INTRINSIC psimd_f32 psimd_abs_f32(psimd_f32 v) {
const psimd_s32 mask = (psimd_s32) psimd_splat_f32(-0.0f);
return (psimd_f32) ((psimd_s32) v & ~mask);
}
/* Vector negation */
PSIMD_INTRINSIC psimd_f32 psimd_neg_f32(psimd_f32 v) {
const psimd_s32 mask = (psimd_s32) psimd_splat_f32(-0.0f);
return (psimd_f32) ((psimd_s32) v ^ mask);
}
/* Vector maximum */
PSIMD_INTRINSIC psimd_s8 psimd_max_s8(psimd_s8 a, psimd_s8 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_s8) vmaxq_s8((int8x16_t) a, (int8x16_t) b);
#else
return psimd_blend_s8(a > b, a, b);
#endif
}
PSIMD_INTRINSIC psimd_u8 psimd_max_u8(psimd_u8 a, psimd_u8 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_u8) vmaxq_u8((uint8x16_t) a, (uint8x16_t) b);
#else
return psimd_blend_u8(a > b, a, b);
#endif
}
PSIMD_INTRINSIC psimd_s16 psimd_max_s16(psimd_s16 a, psimd_s16 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_s16) vmaxq_s16((int16x8_t) a, (int16x8_t) b);
#else
return psimd_blend_s16(a > b, a, b);
#endif
}
PSIMD_INTRINSIC psimd_u16 psimd_max_u16(psimd_u16 a, psimd_u16 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_u16) vmaxq_u16((uint16x8_t) a, (uint16x8_t) b);
#else
return psimd_blend_u16(a > b, a, b);
#endif
}
PSIMD_INTRINSIC psimd_s32 psimd_max_s32(psimd_s32 a, psimd_s32 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_s32) vmaxq_s32((int32x4_t) a, (int32x4_t) b);
#else
return psimd_blend_s32(a > b, a, b);
#endif
}
PSIMD_INTRINSIC psimd_u32 psimd_max_u32(psimd_u32 a, psimd_u32 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_u32) vmaxq_u32((uint32x4_t) a, (uint32x4_t) b);
#else
return psimd_blend_u32(a > b, a, b);
#endif
}
PSIMD_INTRINSIC psimd_f32 psimd_max_f32(psimd_f32 a, psimd_f32 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_f32) vmaxq_f32((float32x4_t) a, (float32x4_t) b);
#elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
return __builtin_wasm_max_f32x4(a, b);
#else
return psimd_blend_f32(a > b, a, b);
#endif
}
/* Vector minimum */
PSIMD_INTRINSIC psimd_s8 psimd_min_s8(psimd_s8 a, psimd_s8 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_s8) vminq_s8((int8x16_t) a, (int8x16_t) b);
#else
return psimd_blend_s8(a < b, a, b);
#endif
}
PSIMD_INTRINSIC psimd_u8 psimd_min_u8(psimd_u8 a, psimd_u8 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_u8) vminq_u8((uint8x16_t) a, (uint8x16_t) b);
#else
return psimd_blend_u8(a < b, a, b);
#endif
}
PSIMD_INTRINSIC psimd_s16 psimd_min_s16(psimd_s16 a, psimd_s16 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_s16) vminq_s16((int16x8_t) a, (int16x8_t) b);
#else
return psimd_blend_s16(a < b, a, b);
#endif
}
PSIMD_INTRINSIC psimd_u16 psimd_min_u16(psimd_u16 a, psimd_u16 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_u16) vminq_u16((uint16x8_t) a, (uint16x8_t) b);
#else
return psimd_blend_u16(a < b, a, b);
#endif
}
PSIMD_INTRINSIC psimd_s32 psimd_min_s32(psimd_s32 a, psimd_s32 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_s32) vminq_s32((int32x4_t) a, (int32x4_t) b);
#else
return psimd_blend_s32(a < b, a, b);
#endif
}
PSIMD_INTRINSIC psimd_u32 psimd_min_u32(psimd_u32 a, psimd_u32 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_u32) vminq_u32((uint32x4_t) a, (uint32x4_t) b);
#else
return psimd_blend_u32(a < b, a, b);
#endif
}
PSIMD_INTRINSIC psimd_f32 psimd_min_f32(psimd_f32 a, psimd_f32 b) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_f32) vminq_f32((float32x4_t) a, (float32x4_t) b);
#elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
return __builtin_wasm_min_f32x4(a, b);
#else
return psimd_blend_f32(a < b, a, b);
#endif
}
PSIMD_INTRINSIC psimd_f32 psimd_cvt_s32_f32(psimd_s32 v) {
#if defined(__clang__)
return __builtin_convertvector(v, psimd_f32);
#elif defined(__ARM_NEON__) || defined(__ARM_NEON)
return (psimd_f32) vcvtq_f32_s32((int32x4_t) v);
#elif defined(__SSE2__)
return (psimd_f32) _mm_cvtepi32_ps((__m128i) v);
#else
return (psimd_f32) { (float) v[0], (float) v[1], (float) v[2], (float) v[3] };
#endif
}
/* Broadcast vector element */
#if defined(__clang__)
PSIMD_INTRINSIC psimd_f32 psimd_splat0_f32(psimd_f32 v) {
return __builtin_shufflevector(v, v, 0, 0, 0, 0);
}
PSIMD_INTRINSIC psimd_f32 psimd_splat1_f32(psimd_f32 v) {
return __builtin_shufflevector(v, v, 1, 1, 1, 1);
}
PSIMD_INTRINSIC psimd_f32 psimd_splat2_f32(psimd_f32 v) {
return __builtin_shufflevector(v, v, 2, 2, 2, 2);
}
PSIMD_INTRINSIC psimd_f32 psimd_splat3_f32(psimd_f32 v) {
return __builtin_shufflevector(v, v, 3, 3, 3, 3);
}
#else
PSIMD_INTRINSIC psimd_f32 psimd_splat0_f32(psimd_f32 v) {
return __builtin_shuffle(v, (psimd_s32) { 0, 0, 0, 0 });
}
PSIMD_INTRINSIC psimd_f32 psimd_splat1_f32(psimd_f32 v) {
return __builtin_shuffle(v, (psimd_s32) { 1, 1, 1, 1 });
}
PSIMD_INTRINSIC psimd_f32 psimd_splat2_f32(psimd_f32 v) {
return __builtin_shuffle(v, (psimd_s32) { 2, 2, 2, 2 });
}
PSIMD_INTRINSIC psimd_f32 psimd_splat3_f32(psimd_f32 v) {
return __builtin_shuffle(v, (psimd_s32) { 3, 3, 3, 3 });
}
#endif
/* Reversal of vector elements */
#if defined(__clang__)
PSIMD_INTRINSIC psimd_s8 psimd_reverse_s8(psimd_s8 v) {
return __builtin_shufflevector(v, v, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
}
PSIMD_INTRINSIC psimd_u8 psimd_reverse_u8(psimd_u8 v) {
return __builtin_shufflevector(v, v, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
}
PSIMD_INTRINSIC psimd_s16 psimd_reverse_s16(psimd_s16 v) {
return __builtin_shufflevector(v, v, 7, 6, 5, 4, 3, 2, 1, 0);
}
PSIMD_INTRINSIC psimd_u16 psimd_reverse_u16(psimd_u16 v) {
return __builtin_shufflevector(v, v, 7, 6, 5, 4, 3, 2, 1, 0);
}
PSIMD_INTRINSIC psimd_s32 psimd_reverse_s32(psimd_s32 v) {
return __builtin_shufflevector(v, v, 3, 2, 1, 0);
}
PSIMD_INTRINSIC psimd_u32 psimd_reverse_u32(psimd_u32 v) {
return __builtin_shufflevector(v, v, 3, 2, 1, 0);
}
PSIMD_INTRINSIC psimd_f32 psimd_reverse_f32(psimd_f32 v) {
return __builtin_shufflevector(v, v, 3, 2, 1, 0);
}
#else
PSIMD_INTRINSIC psimd_s8 psimd_reverse_s8(psimd_s8 v) {
return __builtin_shuffle(v, (psimd_s8) { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 });
}
PSIMD_INTRINSIC psimd_u8 psimd_reverse_u8(psimd_u8 v) {
return __builtin_shuffle(v, (psimd_s8) { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 });
}
PSIMD_INTRINSIC psimd_s16 psimd_reverse_s16(psimd_s16 v) {
return __builtin_shuffle(v, (psimd_s16) { 7, 6, 5, 4, 3, 2, 1, 0 });
}
PSIMD_INTRINSIC psimd_u16 psimd_reverse_u16(psimd_u16 v) {
return __builtin_shuffle(v, (psimd_s16) { 7, 6, 5, 4, 3, 2, 1, 0 });
}
PSIMD_INTRINSIC psimd_s32 psimd_reverse_s32(psimd_s32 v) {
return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 });
}
PSIMD_INTRINSIC psimd_u32 psimd_reverse_u32(psimd_u32 v) {
return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 });
}
PSIMD_INTRINSIC psimd_f32 psimd_reverse_f32(psimd_f32 v) {
return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 });
}
#endif
/* Interleaving of vector elements */
#if defined(__clang__)
PSIMD_INTRINSIC psimd_s16 psimd_interleave_lo_s16(psimd_s16 a, psimd_s16 b) {
return __builtin_shufflevector(a, b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
}
PSIMD_INTRINSIC psimd_s16 psimd_interleave_hi_s16(psimd_s16 a, psimd_s16 b) {
return __builtin_shufflevector(a, b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
}
PSIMD_INTRINSIC psimd_u16 psimd_interleave_lo_u16(psimd_u16 a, psimd_u16 b) {
return __builtin_shufflevector(a, b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
}
PSIMD_INTRINSIC psimd_u16 psimd_interleave_hi_u16(psimd_u16 a, psimd_u16 b) {
return __builtin_shufflevector(a, b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
}
PSIMD_INTRINSIC psimd_s32 psimd_interleave_lo_s32(psimd_s32 a, psimd_s32 b) {
return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1);
}
PSIMD_INTRINSIC psimd_s32 psimd_interleave_hi_s32(psimd_s32 a, psimd_s32 b) {
return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3);
}
PSIMD_INTRINSIC psimd_u32 psimd_interleave_lo_u32(psimd_u32 a, psimd_u32 b) {
return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1);
}
PSIMD_INTRINSIC psimd_u32 psimd_interleave_hi_u32(psimd_u32 a, psimd_u32 b) {
return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3);
}
PSIMD_INTRINSIC psimd_f32 psimd_interleave_lo_f32(psimd_f32 a, psimd_f32 b) {
return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1);
}
PSIMD_INTRINSIC psimd_f32 psimd_interleave_hi_f32(psimd_f32 a, psimd_f32 b) {
return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3);
}
#else
PSIMD_INTRINSIC psimd_s16 psimd_interleave_lo_s16(psimd_s16 a, psimd_s16 b) {
return __builtin_shuffle(a, b, (psimd_s16) { 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3 });
}
PSIMD_INTRINSIC psimd_s16 psimd_interleave_hi_s16(psimd_s16 a, psimd_s16 b) {
return __builtin_shuffle(a, b, (psimd_s16) { 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7 });
}
PSIMD_INTRINSIC psimd_u16 psimd_interleave_lo_u16(psimd_u16 a, psimd_u16 b) {
return __builtin_shuffle(a, b, (psimd_s16) { 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3 });
}
PSIMD_INTRINSIC psimd_u16 psimd_interleave_hi_u16(psimd_u16 a, psimd_u16 b) {
return __builtin_shuffle(a, b, (psimd_s16) { 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7 });
}
PSIMD_INTRINSIC psimd_s32 psimd_interleave_lo_s32(psimd_s32 a, psimd_s32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 });
}
PSIMD_INTRINSIC psimd_s32 psimd_interleave_hi_s32(psimd_s32 a, psimd_s32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 });
}
PSIMD_INTRINSIC psimd_u32 psimd_interleave_lo_u32(psimd_u32 a, psimd_u32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 });
}
PSIMD_INTRINSIC psimd_u32 psimd_interleave_hi_u32(psimd_u32 a, psimd_u32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 });
}
PSIMD_INTRINSIC psimd_f32 psimd_interleave_lo_f32(psimd_f32 a, psimd_f32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 });
}
PSIMD_INTRINSIC psimd_f32 psimd_interleave_hi_f32(psimd_f32 a, psimd_f32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 });
}
#endif
/* Concatenation of low/high vector elements */
#if defined(__clang__)
PSIMD_INTRINSIC psimd_s16 psimd_concat_lo_s16(psimd_s16 a, psimd_s16 b) {
return __builtin_shufflevector(a, b, 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3);
}
PSIMD_INTRINSIC psimd_s16 psimd_concat_hi_s16(psimd_s16 a, psimd_s16 b) {
return __builtin_shufflevector(a, b, 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7);
}
PSIMD_INTRINSIC psimd_u16 psimd_concat_lo_u16(psimd_u16 a, psimd_u16 b) {
return __builtin_shufflevector(a, b, 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3);
}
PSIMD_INTRINSIC psimd_u16 psimd_concat_hi_u16(psimd_u16 a, psimd_u16 b) {
return __builtin_shufflevector(a, b, 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7);
}
PSIMD_INTRINSIC psimd_s32 psimd_concat_lo_s32(psimd_s32 a, psimd_s32 b) {
return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1);
}
PSIMD_INTRINSIC psimd_s32 psimd_concat_hi_s32(psimd_s32 a, psimd_s32 b) {
return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3);
}
PSIMD_INTRINSIC psimd_u32 psimd_concat_lo_u32(psimd_u32 a, psimd_u32 b) {
return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1);
}
PSIMD_INTRINSIC psimd_u32 psimd_concat_hi_u32(psimd_u32 a, psimd_u32 b) {
return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3);
}
PSIMD_INTRINSIC psimd_f32 psimd_concat_lo_f32(psimd_f32 a, psimd_f32 b) {
return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1);
}
PSIMD_INTRINSIC psimd_f32 psimd_concat_hi_f32(psimd_f32 a, psimd_f32 b) {
return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3);
}
#else
PSIMD_INTRINSIC psimd_s16 psimd_concat_lo_s16(psimd_s16 a, psimd_s16 b) {
return __builtin_shuffle(a, b, (psimd_s16) { 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3 });
}
PSIMD_INTRINSIC psimd_s16 psimd_concat_hi_s16(psimd_s16 a, psimd_s16 b) {
return __builtin_shuffle(a, b, (psimd_s16) { 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7 });
}
PSIMD_INTRINSIC psimd_u16 psimd_concat_lo_u16(psimd_u16 a, psimd_u16 b) {
return __builtin_shuffle(a, b, (psimd_s16) { 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3 });
}
PSIMD_INTRINSIC psimd_u16 psimd_concat_hi_u16(psimd_u16 a, psimd_u16 b) {
return __builtin_shuffle(a, b, (psimd_s16) { 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7 });
}
PSIMD_INTRINSIC psimd_s32 psimd_concat_lo_s32(psimd_s32 a, psimd_s32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 });
}
PSIMD_INTRINSIC psimd_s32 psimd_concat_hi_s32(psimd_s32 a, psimd_s32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 });
}
PSIMD_INTRINSIC psimd_u32 psimd_concat_lo_u32(psimd_u32 a, psimd_u32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 });
}
PSIMD_INTRINSIC psimd_u32 psimd_concat_hi_u32(psimd_u32 a, psimd_u32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 });
}
PSIMD_INTRINSIC psimd_f32 psimd_concat_lo_f32(psimd_f32 a, psimd_f32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 });
}
PSIMD_INTRINSIC psimd_f32 psimd_concat_hi_f32(psimd_f32 a, psimd_f32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 });
}
#endif
/* Concatenation of even/odd vector elements */
#if defined(__clang__)
PSIMD_INTRINSIC psimd_s8 psimd_concat_even_s8(psimd_s8 a, psimd_s8 b) {
return __builtin_shufflevector(a, b,
0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14);
}
PSIMD_INTRINSIC psimd_s8 psimd_concat_odd_s8(psimd_s8 a, psimd_s8 b) {
return __builtin_shufflevector(a, b,
1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15);
}
PSIMD_INTRINSIC psimd_u8 psimd_concat_even_u8(psimd_u8 a, psimd_u8 b) {
return __builtin_shufflevector(a, b,
0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14);
}
PSIMD_INTRINSIC psimd_u8 psimd_concat_odd_u8(psimd_u8 a, psimd_u8 b) {
return __builtin_shufflevector(a, b,
1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15);
}
PSIMD_INTRINSIC psimd_s16 psimd_concat_even_s16(psimd_s16 a, psimd_s16 b) {
return __builtin_shufflevector(a, b, 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6);
}
PSIMD_INTRINSIC psimd_s16 psimd_concat_odd_s16(psimd_s16 a, psimd_s16 b) {
return __builtin_shufflevector(a, b, 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7);
}
PSIMD_INTRINSIC psimd_u16 psimd_concat_even_u16(psimd_u16 a, psimd_u16 b) {
return __builtin_shufflevector(a, b, 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6);
}
PSIMD_INTRINSIC psimd_u16 psimd_concat_odd_u16(psimd_u16 a, psimd_u16 b) {
return __builtin_shufflevector(a, b, 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7);
}
PSIMD_INTRINSIC psimd_s32 psimd_concat_even_s32(psimd_s32 a, psimd_s32 b) {
return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2);
}
PSIMD_INTRINSIC psimd_s32 psimd_concat_odd_s32(psimd_s32 a, psimd_s32 b) {
return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3);
}
PSIMD_INTRINSIC psimd_u32 psimd_concat_even_u32(psimd_u32 a, psimd_u32 b) {
return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2);
}
PSIMD_INTRINSIC psimd_u32 psimd_concat_odd_u32(psimd_u32 a, psimd_u32 b) {
return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3);
}
PSIMD_INTRINSIC psimd_f32 psimd_concat_even_f32(psimd_f32 a, psimd_f32 b) {
return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2);
}
PSIMD_INTRINSIC psimd_f32 psimd_concat_odd_f32(psimd_f32 a, psimd_f32 b) {
return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3);
}
#else
PSIMD_INTRINSIC psimd_s8 psimd_concat_even_s8(psimd_s8 a, psimd_s8 b) {
return __builtin_shuffle(a, b,
(psimd_s8) { 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14 });
}
PSIMD_INTRINSIC psimd_s8 psimd_concat_odd_s8(psimd_s8 a, psimd_s8 b) {
return __builtin_shuffle(a, b,
(psimd_s8) { 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15 });
}
PSIMD_INTRINSIC psimd_u8 psimd_concat_even_u8(psimd_u8 a, psimd_u8 b) {
return __builtin_shuffle(a, b,
(psimd_s8) { 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14 });
}
PSIMD_INTRINSIC psimd_u8 psimd_concat_odd_u8(psimd_u8 a, psimd_u8 b) {
return __builtin_shuffle(a, b,
(psimd_s8) { 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15 });
}
PSIMD_INTRINSIC psimd_s16 psimd_concat_even_s16(psimd_s16 a, psimd_s16 b) {
return __builtin_shuffle(a, b, (psimd_s16) { 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6 });
}
PSIMD_INTRINSIC psimd_s16 psimd_concat_odd_s16(psimd_s16 a, psimd_s16 b) {
return __builtin_shuffle(a, b, (psimd_s16) { 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7 });
}
PSIMD_INTRINSIC psimd_u16 psimd_concat_even_u16(psimd_u16 a, psimd_u16 b) {
return __builtin_shuffle(a, b, (psimd_s16) { 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6 });
}
PSIMD_INTRINSIC psimd_u16 psimd_concat_odd_u16(psimd_u16 a, psimd_u16 b) {
return __builtin_shuffle(a, b, (psimd_s16) { 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7 });
}
PSIMD_INTRINSIC psimd_s32 psimd_concat_even_s32(psimd_s32 a, psimd_s32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 });
}
PSIMD_INTRINSIC psimd_s32 psimd_concat_odd_s32(psimd_s32 a, psimd_s32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 });
}
PSIMD_INTRINSIC psimd_u32 psimd_concat_even_u32(psimd_u32 a, psimd_u32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 });
}
PSIMD_INTRINSIC psimd_u32 psimd_concat_odd_u32(psimd_u32 a, psimd_u32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 });
}
PSIMD_INTRINSIC psimd_f32 psimd_concat_even_f32(psimd_f32 a, psimd_f32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 });
}
PSIMD_INTRINSIC psimd_f32 psimd_concat_odd_f32(psimd_f32 a, psimd_f32 b) {
return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 });
}
#endif
/* Vector reduce */
#if defined(__clang__)
PSIMD_INTRINSIC psimd_f32 psimd_allreduce_sum_f32(psimd_f32 v) {
const psimd_f32 temp = v + __builtin_shufflevector(v, v, 2, 3, 0, 1);
return temp + __builtin_shufflevector(temp, temp, 1, 0, 3, 2);
}
PSIMD_INTRINSIC psimd_f32 psimd_allreduce_max_f32(psimd_f32 v) {
const psimd_f32 temp = psimd_max_f32(v, __builtin_shufflevector(v, v, 2, 3, 0, 1));
return psimd_max_f32(temp, __builtin_shufflevector(temp, temp, 1, 0, 3, 2));
}
PSIMD_INTRINSIC psimd_f32 psimd_allreduce_min_f32(psimd_f32 v) {
const psimd_f32 temp = psimd_min_f32(v, __builtin_shufflevector(v, v, 2, 3, 0, 1));
return psimd_min_f32(temp, __builtin_shufflevector(temp, temp, 1, 0, 3, 2));
}
PSIMD_INTRINSIC float psimd_reduce_sum_f32(psimd_f32 v) {
const psimd_f32 temp = v + __builtin_shufflevector(v, v, 2, 3, -1, -1);
const psimd_f32 result = temp + __builtin_shufflevector(temp, temp, 1, -1, -1, -1);
return result[0];
}
PSIMD_INTRINSIC float psimd_reduce_max_f32(psimd_f32 v) {
const psimd_f32 temp = psimd_max_f32(v, __builtin_shufflevector(v, v, 2, 3, -1, -1));
const psimd_f32 result = psimd_max_f32(temp, __builtin_shufflevector(temp, temp, 1, -1, -1, -1));
return result[0];
}
PSIMD_INTRINSIC float psimd_reduce_min_f32(psimd_f32 v) {
const psimd_f32 temp = psimd_min_f32(v, __builtin_shufflevector(v, v, 2, 3, -1, -1));
const psimd_f32 result = psimd_min_f32(temp, __builtin_shufflevector(temp, temp, 1, -1, -1, -1));
return result[0];
}
#else
PSIMD_INTRINSIC psimd_f32 psimd_allreduce_sum_f32(psimd_f32 v) {
const psimd_f32 temp = v + __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 });
return temp + __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 });
}
PSIMD_INTRINSIC psimd_f32 psimd_allreduce_max_f32(psimd_f32 v) {
const psimd_f32 temp = psimd_max_f32(v, __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 }));
return psimd_max_f32(temp, __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 }));
}
PSIMD_INTRINSIC psimd_f32 psimd_allreduce_min_f32(psimd_f32 v) {
const psimd_f32 temp = psimd_min_f32(v, __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 }));
return psimd_min_f32(temp, __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 }));
}
PSIMD_INTRINSIC float psimd_reduce_sum_f32(psimd_f32 v) {
const psimd_f32 result = psimd_allreduce_sum_f32(v);
return result[0];
}
PSIMD_INTRINSIC float psimd_reduce_max_f32(psimd_f32 v) {
const psimd_f32 result = psimd_allreduce_max_f32(v);
return result[0];
}
PSIMD_INTRINSIC float psimd_reduce_min_f32(psimd_f32 v) {
const psimd_f32 result = psimd_allreduce_min_f32(v);
return result[0];
}
#endif
#endif
#endif /* PSIMD_H */
```
|
===================================================================================================================
SOURCE CODE FILE: pthreadpool.h
LINES: 1
SIZE: 99.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pthreadpool.h
ENCODING: utf-8
```h
#ifndef PTHREADPOOL_H_
#define PTHREADPOOL_H_
#include <stddef.h>
#include <stdint.h>
typedef struct pthreadpool* pthreadpool_t;
typedef void (*pthreadpool_task_1d_t)(void*, size_t);
typedef void (*pthreadpool_task_1d_with_thread_t)(void*, size_t, size_t);
typedef void (*pthreadpool_task_1d_tile_1d_t)(void*, size_t, size_t);
typedef void (*pthreadpool_task_2d_t)(void*, size_t, size_t);
typedef void (*pthreadpool_task_2d_with_thread_t)(void*, size_t, size_t, size_t);
typedef void (*pthreadpool_task_2d_tile_1d_t)(void*, size_t, size_t, size_t);
typedef void (*pthreadpool_task_2d_tile_2d_t)(void*, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_3d_t)(void*, size_t, size_t, size_t);
typedef void (*pthreadpool_task_3d_tile_1d_t)(void*, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_3d_tile_1d_with_thread_t)(void*, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_3d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_4d_t)(void*, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_4d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_4d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_5d_t)(void*, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_5d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_5d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_6d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_6d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_6d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_1d_with_id_t)(void*, uint32_t, size_t);
typedef void (*pthreadpool_task_2d_tile_1d_with_id_t)(void*, uint32_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_2d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_3d_tile_1d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_3d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_4d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_2d_tile_1d_with_id_with_thread_t)(void*, uint32_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_3d_tile_1d_with_id_with_thread_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t);
/**
* Disable support for denormalized numbers to the maximum extent possible for
* the duration of the computation.
*
* Handling denormalized floating-point numbers is often implemented in
* microcode, and incurs significant performance degradation. This hint
* instructs the thread pool to disable support for denormalized numbers before
* running the computation by manipulating architecture-specific control
* registers, and restore the initial value of control registers after the
* computation is complete. The thread pool temporary disables denormalized
* numbers on all threads involved in the computation (i.e. the caller threads,
* and potentially worker threads).
*
* Disabling denormalized numbers may have a small negative effect on results'
* accuracy. As various architectures differ in capabilities to control
* processing of denormalized numbers, using this flag may also hurt results'
* reproducibility across different instruction set architectures.
*/
#define PTHREADPOOL_FLAG_DISABLE_DENORMALS 0x00000001
/**
* Yield worker threads to the system scheduler after the operation is finished.
*
* Force workers to use kernel wait (instead of active spin-wait by default) for
* new commands after this command is processed. This flag affects only the
* immediate next operation on this thread pool. To make the thread pool always
* use kernel wait, pass this flag to all parallelization functions.
*/
#define PTHREADPOOL_FLAG_YIELD_WORKERS 0x00000002
#ifdef __cplusplus
extern "C" {
#endif
/**
* Create a thread pool with the specified number of threads.
*
* @param threads_count the number of threads in the thread pool.
* A value of 0 has special interpretation: it creates a thread pool with as
* many threads as there are logical processors in the system.
*
* @returns A pointer to an opaque thread pool object if the call is
* successful, or NULL pointer if the call failed.
*/
pthreadpool_t pthreadpool_create(size_t threads_count);
/**
* Query the number of threads in a thread pool.
*
* @param threadpool the thread pool to query.
*
* @returns The number of threads in the thread pool.
*/
size_t pthreadpool_get_threads_count(pthreadpool_t threadpool);
/**
* Process items on a 1D grid.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range; i++)
* function(context, i);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each item.
* @param context the first argument passed to the specified function.
* @param range the number of items on the 1D grid to process. The
* specified function will be called once for each item.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_1d(
pthreadpool_t threadpool,
pthreadpool_task_1d_t function,
void* context,
size_t range,
uint32_t flags);
/**
* Process items on a 1D grid passing along the current thread id.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range; i++)
* function(context, thread_index, i);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each item.
* @param context the first argument passed to the specified function.
* @param range the number of items on the 1D grid to process. The
* specified function will be called once for each item.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_1d_with_thread(
pthreadpool_t threadpool,
pthreadpool_task_1d_with_thread_t function,
void* context,
size_t range,
uint32_t flags);
/**
* Process items on a 1D grid using a microarchitecture-aware task function.
*
* The function implements a parallel version of the following snippet:
*
* uint32_t uarch_index = cpuinfo_initialize() ?
* cpuinfo_get_current_uarch_index() : default_uarch_index;
* if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
* for (size_t i = 0; i < range; i++)
* function(context, uarch_index, i);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If
* threadpool is NULL, all items are processed serially on the calling
* thread.
* @param function the function to call for each item.
* @param context the first argument passed to the specified
* function.
* @param default_uarch_index the microarchitecture index to use when
* pthreadpool is configured without cpuinfo, cpuinfo initialization failed,
* or index returned by cpuinfo_get_current_uarch_index() exceeds the
* max_uarch_index value.
* @param max_uarch_index the maximum microarchitecture index expected by
* the specified function. If the index returned by
* cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index
* will be used instead. default_uarch_index can exceed max_uarch_index.
* @param range the number of items on the 1D grid to process.
* The specified function will be called once for each item.
* @param flags a bitwise combination of zero or more optional
* flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
* PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_1d_with_uarch(
pthreadpool_t threadpool,
pthreadpool_task_1d_with_id_t function,
void* context,
uint32_t default_uarch_index,
uint32_t max_uarch_index,
size_t range,
uint32_t flags);
/**
* Process items on a 1D grid with specified maximum tile size.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range; i += tile)
* function(context, i, min(range - i, tile));
*
* When the call returns, all items have been processed and the thread pool is
* ready for a new task.
*
* @note If multiple threads call this function with the same thread pool,
* the calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range the number of items on the 1D grid to process.
* @param tile the maximum number of items on the 1D grid to process in
* one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_1d_tile_1d(
pthreadpool_t threadpool,
pthreadpool_task_1d_tile_1d_t function,
void* context,
size_t range,
size_t tile,
uint32_t flags);
/**
* Process items on a 2D grid.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* function(context, i, j);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each item.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 2D grid.
* @param range_j the number of items to process along the second dimension
* of the 2D grid.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_2d(
pthreadpool_t threadpool,
pthreadpool_task_2d_t function,
void* context,
size_t range_i,
size_t range_j,
uint32_t flags);
/**
* Process items on a 2D grid passing along the current thread id.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* function(context, thread_index, i, j);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each item.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 2D grid.
* @param range_j the number of items to process along the second dimension
* of the 2D grid.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_2d_with_thread(
pthreadpool_t threadpool,
pthreadpool_task_2d_with_thread_t function,
void* context,
size_t range_i,
size_t range_j,
uint32_t flags);
/**
* Process items on a 2D grid with the specified maximum tile size along the
* last grid dimension.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j += tile_j)
* function(context, i, j, min(range_j - j, tile_j));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 2D grid.
* @param range_j the number of items to process along the second dimension
* of the 2D grid.
* @param tile_j the maximum number of items along the second dimension of
* the 2D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_2d_tile_1d(
pthreadpool_t threadpool,
pthreadpool_task_2d_tile_1d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t tile_j,
uint32_t flags);
/**
* Process items on a 2D grid with the specified maximum tile size along the
* last grid dimension using a microarchitecture-aware task function.
*
* The function implements a parallel version of the following snippet:
*
* uint32_t uarch_index = cpuinfo_initialize() ?
* cpuinfo_get_current_uarch_index() : default_uarch_index;
* if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j += tile_j)
* function(context, uarch_index, i, j, min(range_j - j, tile_j));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param default_uarch_index the microarchitecture index to use when
* pthreadpool is configured without cpuinfo, cpuinfo initialization failed,
* or index returned by cpuinfo_get_current_uarch_index() exceeds the
* max_uarch_index value.
* @param max_uarch_index the maximum microarchitecture index expected by
* the specified function. If the index returned by
* cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index
* will be used instead. default_uarch_index can exceed max_uarch_index.
* @param range_i the number of items to process along the first dimension
* of the 2D grid.
* @param range_j the number of items to process along the second dimension
* of the 2D grid.
* @param tile_j the maximum number of items along the second dimension of
* the 2D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_2d_tile_1d_with_uarch(
pthreadpool_t threadpool,
pthreadpool_task_2d_tile_1d_with_id_t function,
void* context,
uint32_t default_uarch_index,
uint32_t max_uarch_index,
size_t range_i,
size_t range_j,
size_t tile_j,
uint32_t flags);
/**
* Process items on a 2D grid with the specified maximum tile size along the
* last grid dimension using a microarchitecture-aware task function and passing
* along the current thread id.
*
* The function implements a parallel version of the following snippet:
*
* uint32_t uarch_index = cpuinfo_initialize() ?
* cpuinfo_get_current_uarch_index() : default_uarch_index;
* if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j += tile_j)
* function(context, uarch_index, thread_index, i, j, min(range_j - j, tile_j));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param default_uarch_index the microarchitecture index to use when
* pthreadpool is configured without cpuinfo, cpuinfo initialization failed,
* or index returned by cpuinfo_get_current_uarch_index() exceeds the
* max_uarch_index value.
* @param max_uarch_index the maximum microarchitecture index expected by
* the specified function. If the index returned by
* cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index
* will be used instead. default_uarch_index can exceed max_uarch_index.
* @param range_i the number of items to process along the first dimension
* of the 2D grid.
* @param range_j the number of items to process along the second dimension
* of the 2D grid.
* @param tile_j the maximum number of items along the second dimension of
* the 2D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_2d_tile_1d_with_uarch_with_thread(
pthreadpool_t threadpool,
pthreadpool_task_2d_tile_1d_with_id_with_thread_t function,
void* context,
uint32_t default_uarch_index,
uint32_t max_uarch_index,
size_t range_i,
size_t range_j,
size_t tile_j,
uint32_t flags);
/**
* Process items on a 2D grid with the specified maximum tile size along each
* grid dimension.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i += tile_i)
* for (size_t j = 0; j < range_j; j += tile_j)
* function(context, i, j,
* min(range_i - i, tile_i), min(range_j - j, tile_j));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 2D grid.
* @param range_j the number of items to process along the second dimension
* of the 2D grid.
* @param tile_j the maximum number of items along the first dimension of
* the 2D grid to process in one function call.
* @param tile_j the maximum number of items along the second dimension of
* the 2D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_2d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_2d_tile_2d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t tile_i,
size_t tile_j,
uint32_t flags);
/**
* Process items on a 2D grid with the specified maximum tile size along each
* grid dimension using a microarchitecture-aware task function.
*
* The function implements a parallel version of the following snippet:
*
* uint32_t uarch_index = cpuinfo_initialize() ?
* cpuinfo_get_current_uarch_index() : default_uarch_index;
* if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
* for (size_t i = 0; i < range_i; i += tile_i)
* for (size_t j = 0; j < range_j; j += tile_j)
* function(context, uarch_index, i, j,
* min(range_i - i, tile_i), min(range_j - j, tile_j));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If
* threadpool is NULL, all items are processed serially on the calling
* thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified
* function.
* @param default_uarch_index the microarchitecture index to use when
* pthreadpool is configured without cpuinfo,
* cpuinfo initialization failed, or index returned
* by cpuinfo_get_current_uarch_index() exceeds
* the max_uarch_index value.
* @param max_uarch_index the maximum microarchitecture index expected
* by the specified function. If the index returned
* by cpuinfo_get_current_uarch_index() exceeds this
* value, default_uarch_index will be used instead.
* default_uarch_index can exceed max_uarch_index.
* @param range_i the number of items to process along the first
* dimension of the 2D grid.
* @param range_j the number of items to process along the second
* dimension of the 2D grid.
* @param tile_j the maximum number of items along the first
* dimension of the 2D grid to process in one function call.
* @param tile_j the maximum number of items along the second
* dimension of the 2D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional
* flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
* PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_2d_tile_2d_with_uarch(
pthreadpool_t threadpool,
pthreadpool_task_2d_tile_2d_with_id_t function,
void* context,
uint32_t default_uarch_index,
uint32_t max_uarch_index,
size_t range_i,
size_t range_j,
size_t tile_i,
size_t tile_j,
uint32_t flags);
/**
* Process items on a 3D grid.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* function(context, i, j, k);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 3D grid.
* @param range_j the number of items to process along the second dimension
* of the 3D grid.
* @param range_k the number of items to process along the third dimension
* of the 3D grid.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_3d(
pthreadpool_t threadpool,
pthreadpool_task_3d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t range_k,
uint32_t flags);
/**
* Process items on a 3D grid with the specified maximum tile size along the
* last grid dimension.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k += tile_k)
* function(context, i, j, k, min(range_k - k, tile_k));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 3D grid.
* @param range_j the number of items to process along the second dimension
* of the 3D grid.
* @param range_k the number of items to process along the third dimension
* of the 3D grid.
* @param tile_k the maximum number of items along the third dimension of
* the 3D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_3d_tile_1d(
pthreadpool_t threadpool,
pthreadpool_task_3d_tile_1d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t range_k,
size_t tile_k,
uint32_t flags);
/**
* Process items on a 3D grid with the specified maximum tile size along the
* last grid dimension and passing along the current thread id.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k += tile_k)
* function(context, thread_index, i, j, k, min(range_k - k, tile_k));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 3D grid.
* @param range_j the number of items to process along the second dimension
* of the 3D grid.
* @param range_k the number of items to process along the third dimension
* of the 3D grid.
* @param tile_k the maximum number of items along the third dimension of
* the 3D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_3d_tile_1d_with_thread(
pthreadpool_t threadpool,
pthreadpool_task_3d_tile_1d_with_thread_t function,
void* context,
size_t range_i,
size_t range_j,
size_t range_k,
size_t tile_k,
uint32_t flags);
/**
* Process items on a 3D grid with the specified maximum tile size along the
* last grid dimension using a microarchitecture-aware task function.
*
* The function implements a parallel version of the following snippet:
*
* uint32_t uarch_index = cpuinfo_initialize() ?
* cpuinfo_get_current_uarch_index() : default_uarch_index;
* if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k += tile_k)
* function(context, uarch_index, i, j, k, min(range_k - k, tile_k));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If
* threadpool is NULL, all items are processed serially on the calling
* thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified
* function.
* @param default_uarch_index the microarchitecture index to use when
* pthreadpool is configured without cpuinfo, cpuinfo initialization failed,
* or index returned by cpuinfo_get_current_uarch_index() exceeds the
* max_uarch_index value.
* @param max_uarch_index the maximum microarchitecture index expected by
* the specified function. If the index returned by
* cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index
* will be used instead. default_uarch_index can exceed max_uarch_index.
* @param range_i the number of items to process along the first
* dimension of the 3D grid.
* @param range_j the number of items to process along the second
* dimension of the 3D grid.
* @param range_k the number of items to process along the third
* dimension of the 3D grid.
* @param tile_k the maximum number of items along the third
* dimension of the 3D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional
* flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
* PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_3d_tile_1d_with_uarch(
pthreadpool_t threadpool,
pthreadpool_task_3d_tile_1d_with_id_t function,
void* context,
uint32_t default_uarch_index,
uint32_t max_uarch_index,
size_t range_i,
size_t range_j,
size_t range_k,
size_t tile_k,
uint32_t flags);
/**
* Process items on a 3D grid with the specified maximum tile size along the
* last grid dimension using a microarchitecture-aware task function and passing
* along the current thread id.
*
* The function implements a parallel version of the following snippet:
*
* uint32_t uarch_index = cpuinfo_initialize() ?
* cpuinfo_get_current_uarch_index() : default_uarch_index;
* if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k += tile_k)
* function(context, uarch_index, thread_index, i, j, k, min(range_k - k, tile_k));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If
* threadpool is NULL, all items are processed serially on the calling
* thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified
* function.
* @param default_uarch_index the microarchitecture index to use when
* pthreadpool is configured without cpuinfo, cpuinfo initialization failed,
* or index returned by cpuinfo_get_current_uarch_index() exceeds the
* max_uarch_index value.
* @param max_uarch_index the maximum microarchitecture index expected by
* the specified function. If the index returned by
* cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index
* will be used instead. default_uarch_index can exceed max_uarch_index.
* @param range_i the number of items to process along the first
* dimension of the 3D grid.
* @param range_j the number of items to process along the second
* dimension of the 3D grid.
* @param range_k the number of items to process along the third
* dimension of the 3D grid.
* @param tile_k the maximum number of items along the third
* dimension of the 3D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional
* flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
* PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_3d_tile_1d_with_uarch_with_thread(
pthreadpool_t threadpool,
pthreadpool_task_3d_tile_1d_with_id_with_thread_t function,
void* context,
uint32_t default_uarch_index,
uint32_t max_uarch_index,
size_t range_i,
size_t range_j,
size_t range_k,
size_t tile_k,
uint32_t flags);
/**
* Process items on a 3D grid with the specified maximum tile size along the
* last two grid dimensions.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j += tile_j)
* for (size_t k = 0; k < range_k; k += tile_k)
* function(context, i, j, k,
* min(range_j - j, tile_j), min(range_k - k, tile_k));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 3D grid.
* @param range_j the number of items to process along the second dimension
* of the 3D grid.
* @param range_k the number of items to process along the third dimension
* of the 3D grid.
* @param tile_j the maximum number of items along the second dimension of
* the 3D grid to process in one function call.
* @param tile_k the maximum number of items along the third dimension of
* the 3D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_3d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_3d_tile_2d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t range_k,
size_t tile_j,
size_t tile_k,
uint32_t flags);
/**
* Process items on a 3D grid with the specified maximum tile size along the
* last two grid dimensions using a microarchitecture-aware task function.
*
* The function implements a parallel version of the following snippet:
*
* uint32_t uarch_index = cpuinfo_initialize() ?
* cpuinfo_get_current_uarch_index() : default_uarch_index;
* if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j += tile_j)
* for (size_t k = 0; k < range_k; k += tile_k)
* function(context, uarch_index, i, j, k,
* min(range_j - j, tile_j), min(range_k - k, tile_k));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If
* threadpool is NULL, all items are processed serially on the calling
* thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified
* function.
* @param default_uarch_index the microarchitecture index to use when
* pthreadpool is configured without cpuinfo, cpuinfo initialization failed,
* or index returned by cpuinfo_get_current_uarch_index() exceeds the
* max_uarch_index value.
* @param max_uarch_index the maximum microarchitecture index expected by
* the specified function. If the index returned by
* cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index
* will be used instead. default_uarch_index can exceed max_uarch_index.
* @param range_i the number of items to process along the first
* dimension of the 3D grid.
* @param range_j the number of items to process along the second
* dimension of the 3D grid.
* @param range_k the number of items to process along the third
* dimension of the 3D grid.
* @param tile_j the maximum number of items along the second
* dimension of the 3D grid to process in one function call.
* @param tile_k the maximum number of items along the third
* dimension of the 3D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional
* flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
* PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_3d_tile_2d_with_uarch(
pthreadpool_t threadpool,
pthreadpool_task_3d_tile_2d_with_id_t function,
void* context,
uint32_t default_uarch_index,
uint32_t max_uarch_index,
size_t range_i,
size_t range_j,
size_t range_k,
size_t tile_j,
size_t tile_k,
uint32_t flags);
/**
* Process items on a 4D grid.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l++)
* function(context, i, j, k, l);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 4D grid.
* @param range_j the number of items to process along the second dimension
* of the 4D grid.
* @param range_k the number of items to process along the third dimension
* of the 4D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 4D grid.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_4d(
pthreadpool_t threadpool,
pthreadpool_task_4d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
uint32_t flags);
/**
* Process items on a 4D grid with the specified maximum tile size along the
* last grid dimension.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l += tile_l)
* function(context, i, j, k, l, min(range_l - l, tile_l));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 4D grid.
* @param range_j the number of items to process along the second dimension
* of the 4D grid.
* @param range_k the number of items to process along the third dimension
* of the 4D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 4D grid.
* @param tile_l the maximum number of items along the fourth dimension of
* the 4D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_4d_tile_1d(
pthreadpool_t threadpool,
pthreadpool_task_4d_tile_1d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t tile_l,
uint32_t flags);
/**
* Process items on a 4D grid with the specified maximum tile size along the
* last two grid dimensions.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k += tile_k)
* for (size_t l = 0; l < range_l; l += tile_l)
* function(context, i, j, k, l,
* min(range_k - k, tile_k), min(range_l - l, tile_l));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 4D grid.
* @param range_j the number of items to process along the second dimension
* of the 4D grid.
* @param range_k the number of items to process along the third dimension
* of the 4D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 4D grid.
* @param tile_k the maximum number of items along the third dimension of
* the 4D grid to process in one function call.
* @param tile_l the maximum number of items along the fourth dimension of
* the 4D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_4d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_4d_tile_2d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t tile_k,
size_t tile_l,
uint32_t flags);
/**
* Process items on a 4D grid with the specified maximum tile size along the
* last two grid dimensions using a microarchitecture-aware task function.
*
* The function implements a parallel version of the following snippet:
*
* uint32_t uarch_index = cpuinfo_initialize() ?
* cpuinfo_get_current_uarch_index() : default_uarch_index;
* if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k += tile_k)
* for (size_t l = 0; l < range_l; l += tile_l)
* function(context, uarch_index, i, j, k, l,
* min(range_k - k, tile_k), min(range_l - l, tile_l));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If
* threadpool is NULL, all items are processed serially on the calling
* thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified
* function.
* @param default_uarch_index the microarchitecture index to use when
* pthreadpool is configured without cpuinfo, cpuinfo initialization failed,
* or index returned by cpuinfo_get_current_uarch_index() exceeds the
* max_uarch_index value.
* @param max_uarch_index the maximum microarchitecture index expected by
* the specified function. If the index returned by
* cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index
* will be used instead. default_uarch_index can exceed max_uarch_index.
* @param range_i the number of items to process along the first
* dimension of the 4D grid.
* @param range_j the number of items to process along the second
* dimension of the 4D grid.
* @param range_k the number of items to process along the third
* dimension of the 4D grid.
* @param range_l the number of items to process along the fourth
* dimension of the 4D grid.
* @param tile_k the maximum number of items along the third
* dimension of the 4D grid to process in one function call.
* @param tile_l the maximum number of items along the fourth
* dimension of the 4D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional
* flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
* PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_4d_tile_2d_with_uarch(
pthreadpool_t threadpool,
pthreadpool_task_4d_tile_2d_with_id_t function,
void* context,
uint32_t default_uarch_index,
uint32_t max_uarch_index,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t tile_k,
size_t tile_l,
uint32_t flags);
/**
* Process items on a 5D grid.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l++)
* for (size_t m = 0; m < range_m; m++)
* function(context, i, j, k, l, m);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 5D grid.
* @param range_j the number of items to process along the second dimension
* of the 5D grid.
* @param range_k the number of items to process along the third dimension
* of the 5D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 5D grid.
* @param range_m the number of items to process along the fifth dimension
* of the 5D grid.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_5d(
pthreadpool_t threadpool,
pthreadpool_task_5d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t range_m,
uint32_t flags);
/**
* Process items on a 5D grid with the specified maximum tile size along the
* last grid dimension.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l++)
* for (size_t m = 0; m < range_m; m += tile_m)
* function(context, i, j, k, l, m, min(range_m - m, tile_m));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 5D grid.
* @param range_j the number of items to process along the second dimension
* of the 5D grid.
* @param range_k the number of items to process along the third dimension
* of the 5D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 5D grid.
* @param range_m the number of items to process along the fifth dimension
* of the 5D grid.
* @param tile_m the maximum number of items along the fifth dimension of
* the 5D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_5d_tile_1d(
pthreadpool_t threadpool,
pthreadpool_task_5d_tile_1d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t range_m,
size_t tile_m,
uint32_t flags);
/**
* Process items on a 5D grid with the specified maximum tile size along the
* last two grid dimensions.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l += tile_l)
* for (size_t m = 0; m < range_m; m += tile_m)
* function(context, i, j, k, l, m,
* min(range_l - l, tile_l), min(range_m - m, tile_m));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 5D grid.
* @param range_j the number of items to process along the second dimension
* of the 5D grid.
* @param range_k the number of items to process along the third dimension
* of the 5D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 5D grid.
* @param range_m the number of items to process along the fifth dimension
* of the 5D grid.
* @param tile_l the maximum number of items along the fourth dimension of
* the 5D grid to process in one function call.
* @param tile_m the maximum number of items along the fifth dimension of
* the 5D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_5d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_5d_tile_2d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t range_m,
size_t tile_l,
size_t tile_m,
uint32_t flags);
/**
* Process items on a 6D grid.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l++)
* for (size_t m = 0; m < range_m; m++)
* for (size_t n = 0; n < range_n; n++)
* function(context, i, j, k, l, m, n);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 6D grid.
* @param range_j the number of items to process along the second dimension
* of the 6D grid.
* @param range_k the number of items to process along the third dimension
* of the 6D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 6D grid.
* @param range_m the number of items to process along the fifth dimension
* of the 6D grid.
* @param range_n the number of items to process along the sixth dimension
* of the 6D grid.
* @param tile_n the maximum number of items along the sixth dimension of
* the 6D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_6d(
pthreadpool_t threadpool,
pthreadpool_task_6d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t range_m,
size_t range_n,
uint32_t flags);
/**
* Process items on a 6D grid with the specified maximum tile size along the
* last grid dimension.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l++)
* for (size_t m = 0; m < range_m; m++)
* for (size_t n = 0; n < range_n; n += tile_n)
* function(context, i, j, k, l, m, n, min(range_n - n, tile_n));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 6D grid.
* @param range_j the number of items to process along the second dimension
* of the 6D grid.
* @param range_k the number of items to process along the third dimension
* of the 6D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 6D grid.
* @param range_m the number of items to process along the fifth dimension
* of the 6D grid.
* @param range_n the number of items to process along the sixth dimension
* of the 6D grid.
* @param tile_n the maximum number of items along the sixth dimension of
* the 6D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_6d_tile_1d(
pthreadpool_t threadpool,
pthreadpool_task_6d_tile_1d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t range_m,
size_t range_n,
size_t tile_n,
uint32_t flags);
/**
* Process items on a 6D grid with the specified maximum tile size along the
* last two grid dimensions.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l++)
* for (size_t m = 0; m < range_m; m += tile_m)
* for (size_t n = 0; n < range_n; n += tile_n)
* function(context, i, j, k, l, m, n,
* min(range_m - m, tile_m), min(range_n - n, tile_n));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param function the function to call for each tile.
* @param context the first argument passed to the specified function.
* @param range_i the number of items to process along the first dimension
* of the 6D grid.
* @param range_j the number of items to process along the second dimension
* of the 6D grid.
* @param range_k the number of items to process along the third dimension
* of the 6D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 6D grid.
* @param range_m the number of items to process along the fifth dimension
* of the 6D grid.
* @param range_n the number of items to process along the sixth dimension
* of the 6D grid.
* @param tile_m the maximum number of items along the fifth dimension of
* the 6D grid to process in one function call.
* @param tile_n the maximum number of items along the sixth dimension of
* the 6D grid to process in one function call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_6d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_6d_tile_2d_t function,
void* context,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t range_m,
size_t range_n,
size_t tile_m,
size_t tile_n,
uint32_t flags);
/**
* Terminates threads in the thread pool and releases associated resources.
*
* @warning Accessing the thread pool after a call to this function constitutes
* undefined behaviour and may cause data corruption.
*
* @param[in,out] threadpool The thread pool to destroy.
*/
void pthreadpool_destroy(pthreadpool_t threadpool);
#ifndef PTHREADPOOL_NO_DEPRECATED_API
/* Legacy API for compatibility with pre-existing users (e.g. NNPACK) */
#if defined(__GNUC__)
#define PTHREADPOOL_DEPRECATED __attribute__((__deprecated__))
#else
#define PTHREADPOOL_DEPRECATED
#endif
typedef void (*pthreadpool_function_1d_t)(void*, size_t);
typedef void (*pthreadpool_function_1d_tiled_t)(void*, size_t, size_t);
typedef void (*pthreadpool_function_2d_t)(void*, size_t, size_t);
typedef void (*pthreadpool_function_2d_tiled_t)(void*, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_function_3d_tiled_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_function_4d_tiled_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t);
void pthreadpool_compute_1d(
pthreadpool_t threadpool,
pthreadpool_function_1d_t function,
void* argument,
size_t range) PTHREADPOOL_DEPRECATED;
void pthreadpool_compute_1d_tiled(
pthreadpool_t threadpool,
pthreadpool_function_1d_tiled_t function,
void* argument,
size_t range,
size_t tile) PTHREADPOOL_DEPRECATED;
void pthreadpool_compute_2d(
pthreadpool_t threadpool,
pthreadpool_function_2d_t function,
void* argument,
size_t range_i,
size_t range_j) PTHREADPOOL_DEPRECATED;
void pthreadpool_compute_2d_tiled(
pthreadpool_t threadpool,
pthreadpool_function_2d_tiled_t function,
void* argument,
size_t range_i,
size_t range_j,
size_t tile_i,
size_t tile_j) PTHREADPOOL_DEPRECATED;
void pthreadpool_compute_3d_tiled(
pthreadpool_t threadpool,
pthreadpool_function_3d_tiled_t function,
void* argument,
size_t range_i,
size_t range_j,
size_t range_k,
size_t tile_i,
size_t tile_j,
size_t tile_k) PTHREADPOOL_DEPRECATED;
void pthreadpool_compute_4d_tiled(
pthreadpool_t threadpool,
pthreadpool_function_4d_tiled_t function,
void* argument,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t tile_i,
size_t tile_j,
size_t tile_k,
size_t tile_l) PTHREADPOOL_DEPRECATED;
#endif /* PTHREADPOOL_NO_DEPRECATED_API */
#ifdef __cplusplus
} /* extern "C" */
#endif
#ifdef __cplusplus
namespace libpthreadpool {
namespace detail {
namespace {
template<class T>
void call_wrapper_1d(void* arg, size_t i) {
(*static_cast<const T*>(arg))(i);
}
template<class T>
void call_wrapper_1d_tile_1d(void* arg, size_t range_i, size_t tile_i) {
(*static_cast<const T*>(arg))(range_i, tile_i);
}
template<class T>
void call_wrapper_2d(void* functor, size_t i, size_t j) {
(*static_cast<const T*>(functor))(i, j);
}
template<class T>
void call_wrapper_2d_tile_1d(void* functor,
size_t i, size_t range_j, size_t tile_j)
{
(*static_cast<const T*>(functor))(i, range_j, tile_j);
}
template<class T>
void call_wrapper_2d_tile_2d(void* functor,
size_t range_i, size_t range_j,
size_t tile_i, size_t tile_j)
{
(*static_cast<const T*>(functor))(range_i, range_j, tile_i, tile_j);
}
template<class T>
void call_wrapper_3d(void* functor, size_t i, size_t j, size_t k) {
(*static_cast<const T*>(functor))(i, j, k);
}
template<class T>
void call_wrapper_3d_tile_1d(void* functor,
size_t i, size_t j, size_t range_k,
size_t tile_k)
{
(*static_cast<const T*>(functor))(i, j, range_k, tile_k);
}
template<class T>
void call_wrapper_3d_tile_2d(void* functor,
size_t i, size_t range_j, size_t range_k,
size_t tile_j, size_t tile_k)
{
(*static_cast<const T*>(functor))(i, range_j, range_k, tile_j, tile_k);
}
template<class T>
void call_wrapper_4d(void* functor, size_t i, size_t j, size_t k, size_t l) {
(*static_cast<const T*>(functor))(i, j, k, l);
}
template<class T>
void call_wrapper_4d_tile_1d(void* functor,
size_t i, size_t j, size_t k, size_t range_l,
size_t tile_l)
{
(*static_cast<const T*>(functor))(i, j, k, range_l, tile_l);
}
template<class T>
void call_wrapper_4d_tile_2d(void* functor,
size_t i, size_t j, size_t range_k, size_t range_l,
size_t tile_k, size_t tile_l)
{
(*static_cast<const T*>(functor))(i, j, range_k, range_l, tile_k, tile_l);
}
template<class T>
void call_wrapper_5d(void* functor, size_t i, size_t j, size_t k, size_t l, size_t m) {
(*static_cast<const T*>(functor))(i, j, k, l, m);
}
template<class T>
void call_wrapper_5d_tile_1d(void* functor,
size_t i, size_t j, size_t k, size_t l, size_t range_m,
size_t tile_m)
{
(*static_cast<const T*>(functor))(i, j, k, l, range_m, tile_m);
}
template<class T>
void call_wrapper_5d_tile_2d(void* functor,
size_t i, size_t j, size_t k, size_t range_l, size_t range_m,
size_t tile_l, size_t tile_m)
{
(*static_cast<const T*>(functor))(i, j, k, range_l, range_m, tile_l, tile_m);
}
template<class T>
void call_wrapper_6d(void* functor, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) {
(*static_cast<const T*>(functor))(i, j, k, l, m, n);
}
template<class T>
void call_wrapper_6d_tile_1d(void* functor,
size_t i, size_t j, size_t k, size_t l, size_t m, size_t range_n,
size_t tile_n)
{
(*static_cast<const T*>(functor))(i, j, k, l, m, range_n, tile_n);
}
template<class T>
void call_wrapper_6d_tile_2d(void* functor,
size_t i, size_t j, size_t k, size_t l, size_t range_m, size_t range_n,
size_t tile_m, size_t tile_n)
{
(*static_cast<const T*>(functor))(i, j, k, l, range_m, range_n, tile_m, tile_n);
}
} /* namespace */
} /* namespace detail */
} /* namespace libpthreadpool */
/**
* Process items on a 1D grid.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range; i++)
* functor(i);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each item.
* @param range the number of items on the 1D grid to process. The
* specified functor will be called once for each item.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_1d(
pthreadpool_t threadpool,
const T& functor,
size_t range,
uint32_t flags = 0)
{
pthreadpool_parallelize_1d(
threadpool,
&libpthreadpool::detail::call_wrapper_1d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range,
flags);
}
/**
* Process items on a 1D grid with specified maximum tile size.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range; i += tile)
* functor(i, min(range - i, tile));
*
* When the call returns, all items have been processed and the thread pool is
* ready for a new task.
*
* @note If multiple threads call this function with the same thread pool,
* the calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range the number of items on the 1D grid to process.
* @param tile the maximum number of items on the 1D grid to process in
* one functor call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_1d_tile_1d(
pthreadpool_t threadpool,
const T& functor,
size_t range,
size_t tile,
uint32_t flags = 0)
{
pthreadpool_parallelize_1d_tile_1d(
threadpool,
&libpthreadpool::detail::call_wrapper_1d_tile_1d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range,
tile,
flags);
}
/**
* Process items on a 2D grid.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* functor(i, j);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each item.
* @param range_i the number of items to process along the first dimension
* of the 2D grid.
* @param range_j the number of items to process along the second dimension
* of the 2D grid.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_2d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
uint32_t flags = 0)
{
pthreadpool_parallelize_2d(
threadpool,
&libpthreadpool::detail::call_wrapper_2d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
flags);
}
/**
* Process items on a 2D grid with the specified maximum tile size along the
* last grid dimension.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j += tile_j)
* functor(i, j, min(range_j - j, tile_j));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 2D grid.
* @param range_j the number of items to process along the second dimension
* of the 2D grid.
* @param tile_j the maximum number of items along the second dimension of
* the 2D grid to process in one functor call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_2d_tile_1d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t tile_j,
uint32_t flags = 0)
{
pthreadpool_parallelize_2d_tile_1d(
threadpool,
&libpthreadpool::detail::call_wrapper_2d_tile_1d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
tile_j,
flags);
}
/**
* Process items on a 2D grid with the specified maximum tile size along each
* grid dimension.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i += tile_i)
* for (size_t j = 0; j < range_j; j += tile_j)
* functor(i, j,
* min(range_i - i, tile_i), min(range_j - j, tile_j));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 2D grid.
* @param range_j the number of items to process along the second dimension
* of the 2D grid.
* @param tile_j the maximum number of items along the first dimension of
* the 2D grid to process in one functor call.
* @param tile_j the maximum number of items along the second dimension of
* the 2D grid to process in one functor call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_2d_tile_2d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t tile_i,
size_t tile_j,
uint32_t flags = 0)
{
pthreadpool_parallelize_2d_tile_2d(
threadpool,
&libpthreadpool::detail::call_wrapper_2d_tile_2d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
tile_i,
tile_j,
flags);
}
/**
* Process items on a 3D grid.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* functor(i, j, k);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 3D grid.
* @param range_j the number of items to process along the second dimension
* of the 3D grid.
* @param range_k the number of items to process along the third dimension
* of the 3D grid.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_3d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t range_k,
uint32_t flags = 0)
{
pthreadpool_parallelize_3d(
threadpool,
&libpthreadpool::detail::call_wrapper_3d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
range_k,
flags);
}
/**
* Process items on a 3D grid with the specified maximum tile size along the
* last grid dimension.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k += tile_k)
* functor(i, j, k, min(range_k - k, tile_k));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 3D grid.
* @param range_j the number of items to process along the second dimension
* of the 3D grid.
* @param range_k the number of items to process along the third dimension
* of the 3D grid.
* @param tile_k the maximum number of items along the third dimension of
* the 3D grid to process in one functor call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_3d_tile_1d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t range_k,
size_t tile_k,
uint32_t flags = 0)
{
pthreadpool_parallelize_3d_tile_1d(
threadpool,
&libpthreadpool::detail::call_wrapper_3d_tile_1d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
range_k,
tile_k,
flags);
}
/**
* Process items on a 3D grid with the specified maximum tile size along the
* last two grid dimensions.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j += tile_j)
* for (size_t k = 0; k < range_k; k += tile_k)
* functor(i, j, k,
* min(range_j - j, tile_j), min(range_k - k, tile_k));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 3D grid.
* @param range_j the number of items to process along the second dimension
* of the 3D grid.
* @param range_k the number of items to process along the third dimension
* of the 3D grid.
* @param tile_j the maximum number of items along the second dimension of
* the 3D grid to process in one functor call.
* @param tile_k the maximum number of items along the third dimension of
* the 3D grid to process in one functor call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_3d_tile_2d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t range_k,
size_t tile_j,
size_t tile_k,
uint32_t flags = 0)
{
pthreadpool_parallelize_3d_tile_2d(
threadpool,
&libpthreadpool::detail::call_wrapper_3d_tile_2d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
range_k,
tile_j,
tile_k,
flags);
}
/**
* Process items on a 4D grid.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l++)
* functor(i, j, k, l);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 4D grid.
* @param range_j the number of items to process along the second dimension
* of the 4D grid.
* @param range_k the number of items to process along the third dimension
* of the 4D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 4D grid.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_4d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
uint32_t flags = 0)
{
pthreadpool_parallelize_4d(
threadpool,
&libpthreadpool::detail::call_wrapper_4d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
range_k,
range_l,
flags);
}
/**
* Process items on a 4D grid with the specified maximum tile size along the
* last grid dimension.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l += tile_l)
* functor(i, j, k, l, min(range_l - l, tile_l));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 4D grid.
* @param range_j the number of items to process along the second dimension
* of the 4D grid.
* @param range_k the number of items to process along the third dimension
* of the 4D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 4D grid.
* @param tile_l the maximum number of items along the fourth dimension of
* the 4D grid to process in one functor call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_4d_tile_1d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t tile_l,
uint32_t flags = 0)
{
pthreadpool_parallelize_4d_tile_1d(
threadpool,
&libpthreadpool::detail::call_wrapper_4d_tile_1d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
range_k,
range_l,
tile_l,
flags);
}
/**
* Process items on a 4D grid with the specified maximum tile size along the
* last two grid dimensions.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k += tile_k)
* for (size_t l = 0; l < range_l; l += tile_l)
* functor(i, j, k, l,
* min(range_k - k, tile_k), min(range_l - l, tile_l));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 4D grid.
* @param range_j the number of items to process along the second dimension
* of the 4D grid.
* @param range_k the number of items to process along the third dimension
* of the 4D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 4D grid.
* @param tile_k the maximum number of items along the third dimension of
* the 4D grid to process in one functor call.
* @param tile_l the maximum number of items along the fourth dimension of
* the 4D grid to process in one functor call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_4d_tile_2d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t tile_k,
size_t tile_l,
uint32_t flags = 0)
{
pthreadpool_parallelize_4d_tile_2d(
threadpool,
&libpthreadpool::detail::call_wrapper_4d_tile_2d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
range_k,
range_l,
tile_k,
tile_l,
flags);
}
/**
* Process items on a 5D grid.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l++)
* for (size_t m = 0; m < range_m; m++)
* functor(i, j, k, l, m);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 5D grid.
* @param range_j the number of items to process along the second dimension
* of the 5D grid.
* @param range_k the number of items to process along the third dimension
* of the 5D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 5D grid.
* @param range_m the number of items to process along the fifth dimension
* of the 5D grid.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_5d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t range_m,
uint32_t flags = 0)
{
pthreadpool_parallelize_5d(
threadpool,
&libpthreadpool::detail::call_wrapper_5d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
range_k,
range_l,
range_m,
flags);
}
/**
* Process items on a 5D grid with the specified maximum tile size along the
* last grid dimension.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l++)
* for (size_t m = 0; m < range_m; m += tile_m)
* functor(i, j, k, l, m, min(range_m - m, tile_m));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 5D grid.
* @param range_j the number of items to process along the second dimension
* of the 5D grid.
* @param range_k the number of items to process along the third dimension
* of the 5D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 5D grid.
* @param range_m the number of items to process along the fifth dimension
* of the 5D grid.
* @param tile_m the maximum number of items along the fifth dimension of
* the 5D grid to process in one functor call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_5d_tile_1d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t range_m,
size_t tile_m,
uint32_t flags = 0)
{
pthreadpool_parallelize_5d_tile_1d(
threadpool,
&libpthreadpool::detail::call_wrapper_5d_tile_1d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
range_k,
range_l,
range_m,
tile_m,
flags);
}
/**
* Process items on a 5D grid with the specified maximum tile size along the
* last two grid dimensions.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l += tile_l)
* for (size_t m = 0; m < range_m; m += tile_m)
* functor(i, j, k, l, m,
* min(range_l - l, tile_l), min(range_m - m, tile_m));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 5D grid.
* @param range_j the number of items to process along the second dimension
* of the 5D grid.
* @param range_k the number of items to process along the third dimension
* of the 5D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 5D grid.
* @param range_m the number of items to process along the fifth dimension
* of the 5D grid.
* @param tile_l the maximum number of items along the fourth dimension of
* the 5D grid to process in one functor call.
* @param tile_m the maximum number of items along the fifth dimension of
* the 5D grid to process in one functor call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_5d_tile_2d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t range_m,
size_t tile_l,
size_t tile_m,
uint32_t flags = 0)
{
pthreadpool_parallelize_5d_tile_2d(
threadpool,
&libpthreadpool::detail::call_wrapper_5d_tile_2d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
range_k,
range_l,
range_m,
tile_l,
tile_m,
flags);
}
/**
* Process items on a 6D grid.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l++)
* for (size_t m = 0; m < range_m; m++)
* for (size_t n = 0; n < range_n; n++)
* functor(i, j, k, l, m, n);
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 6D grid.
* @param range_j the number of items to process along the second dimension
* of the 6D grid.
* @param range_k the number of items to process along the third dimension
* of the 6D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 6D grid.
* @param range_m the number of items to process along the fifth dimension
* of the 6D grid.
* @param range_n the number of items to process along the sixth dimension
* of the 6D grid.
* @param tile_n the maximum number of items along the sixth dimension of
* the 6D grid to process in one functor call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_6d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t range_m,
size_t range_n,
uint32_t flags = 0)
{
pthreadpool_parallelize_6d(
threadpool,
&libpthreadpool::detail::call_wrapper_6d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
range_k,
range_l,
range_m,
range_n,
flags);
}
/**
* Process items on a 6D grid with the specified maximum tile size along the
* last grid dimension.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l++)
* for (size_t m = 0; m < range_m; m++)
* for (size_t n = 0; n < range_n; n += tile_n)
* functor(i, j, k, l, m, n, min(range_n - n, tile_n));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 6D grid.
* @param range_j the number of items to process along the second dimension
* of the 6D grid.
* @param range_k the number of items to process along the third dimension
* of the 6D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 6D grid.
* @param range_m the number of items to process along the fifth dimension
* of the 6D grid.
* @param range_n the number of items to process along the sixth dimension
* of the 6D grid.
* @param tile_n the maximum number of items along the sixth dimension of
* the 6D grid to process in one functor call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_6d_tile_1d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t range_m,
size_t range_n,
size_t tile_n,
uint32_t flags = 0)
{
pthreadpool_parallelize_6d_tile_1d(
threadpool,
&libpthreadpool::detail::call_wrapper_6d_tile_1d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
range_k,
range_l,
range_m,
range_n,
tile_n,
flags);
}
/**
* Process items on a 6D grid with the specified maximum tile size along the
* last two grid dimensions.
*
* The function implements a parallel version of the following snippet:
*
* for (size_t i = 0; i < range_i; i++)
* for (size_t j = 0; j < range_j; j++)
* for (size_t k = 0; k < range_k; k++)
* for (size_t l = 0; l < range_l; l++)
* for (size_t m = 0; m < range_m; m += tile_m)
* for (size_t n = 0; n < range_n; n += tile_n)
* functor(i, j, k, l, m, n,
* min(range_m - m, tile_m), min(range_n - n, tile_n));
*
* When the function returns, all items have been processed and the thread pool
* is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
* @param threadpool the thread pool to use for parallelisation. If threadpool
* is NULL, all items are processed serially on the calling thread.
* @param functor the functor to call for each tile.
* @param range_i the number of items to process along the first dimension
* of the 6D grid.
* @param range_j the number of items to process along the second dimension
* of the 6D grid.
* @param range_k the number of items to process along the third dimension
* of the 6D grid.
* @param range_l the number of items to process along the fourth dimension
* of the 6D grid.
* @param range_m the number of items to process along the fifth dimension
* of the 6D grid.
* @param range_n the number of items to process along the sixth dimension
* of the 6D grid.
* @param tile_m the maximum number of items along the fifth dimension of
* the 6D grid to process in one functor call.
* @param tile_n the maximum number of items along the sixth dimension of
* the 6D grid to process in one functor call.
* @param flags a bitwise combination of zero or more optional flags
* (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
template<class T>
inline void pthreadpool_parallelize_6d_tile_2d(
pthreadpool_t threadpool,
const T& functor,
size_t range_i,
size_t range_j,
size_t range_k,
size_t range_l,
size_t range_m,
size_t range_n,
size_t tile_m,
size_t tile_n,
uint32_t flags = 0)
{
pthreadpool_parallelize_6d_tile_2d(
threadpool,
&libpthreadpool::detail::call_wrapper_6d_tile_2d<const T>,
const_cast<void*>(static_cast<const void*>(&functor)),
range_i,
range_j,
range_k,
range_l,
range_m,
range_n,
tile_m,
tile_n,
flags);
}
#endif /* __cplusplus */
#endif /* PTHREADPOOL_H_ */
```
|
=====================================================================================================================
SOURCE CODE FILE: attr.h
LINES: 1
SIZE: 24.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\attr.h
ENCODING: utf-8
```h
/*
pybind11/attr.h: Infrastructure for processing custom
type and function attributes
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
#include "cast.h"
#include <functional>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
/// \addtogroup annotations
/// @{
/// Annotation for methods
struct is_method {
handle class_;
explicit is_method(const handle &c) : class_(c) {}
};
/// Annotation for setters
struct is_setter {};
/// Annotation for operators
struct is_operator {};
/// Annotation for classes that cannot be subclassed
struct is_final {};
/// Annotation for parent scope
struct scope {
handle value;
explicit scope(const handle &s) : value(s) {}
};
/// Annotation for documentation
struct doc {
const char *value;
explicit doc(const char *value) : value(value) {}
};
/// Annotation for function names
struct name {
const char *value;
explicit name(const char *value) : value(value) {}
};
/// Annotation indicating that a function is an overload associated with a given "sibling"
struct sibling {
handle value;
explicit sibling(const handle &value) : value(value.ptr()) {}
};
/// Annotation indicating that a class derives from another given type
template <typename T>
struct base {
PYBIND11_DEPRECATED(
"base<T>() was deprecated in favor of specifying 'T' as a template argument to class_")
base() = default;
};
/// Keep patient alive while nurse lives
template <size_t Nurse, size_t Patient>
struct keep_alive {};
/// Annotation indicating that a class is involved in a multiple inheritance relationship
struct multiple_inheritance {};
/// Annotation which enables dynamic attributes, i.e. adds `__dict__` to a class
struct dynamic_attr {};
/// Annotation which enables the buffer protocol for a type
struct buffer_protocol {};
/// Annotation which requests that a special metaclass is created for a type
struct metaclass {
handle value;
PYBIND11_DEPRECATED("py::metaclass() is no longer required. It's turned on by default now.")
metaclass() = default;
/// Override pybind11's default metaclass
explicit metaclass(handle value) : value(value) {}
};
/// Specifies a custom callback with signature `void (PyHeapTypeObject*)` that
/// may be used to customize the Python type.
///
/// The callback is invoked immediately before `PyType_Ready`.
///
/// Note: This is an advanced interface, and uses of it may require changes to
/// work with later versions of pybind11. You may wish to consult the
/// implementation of `make_new_python_type` in `detail/classes.h` to understand
/// the context in which the callback will be run.
struct custom_type_setup {
using callback = std::function<void(PyHeapTypeObject *heap_type)>;
explicit custom_type_setup(callback value) : value(std::move(value)) {}
callback value;
};
/// Annotation that marks a class as local to the module:
struct module_local {
const bool value;
constexpr explicit module_local(bool v = true) : value(v) {}
};
/// Annotation to mark enums as an arithmetic type
struct arithmetic {};
/// Mark a function for addition at the beginning of the existing overload chain instead of the end
struct prepend {};
/** \rst
A call policy which places one or more guard variables (``Ts...``) around the function call.
For example, this definition:
.. code-block:: cpp
m.def("foo", foo, py::call_guard<T>());
is equivalent to the following pseudocode:
.. code-block:: cpp
m.def("foo", [](args...) {
T scope_guard;
return foo(args...); // forwarded arguments
});
\endrst */
template <typename... Ts>
struct call_guard;
template <>
struct call_guard<> {
using type = detail::void_type;
};
template <typename T>
struct call_guard<T> {
static_assert(std::is_default_constructible<T>::value,
"The guard type must be default constructible");
using type = T;
};
template <typename T, typename... Ts>
struct call_guard<T, Ts...> {
struct type {
T guard{}; // Compose multiple guard types with left-to-right default-constructor order
typename call_guard<Ts...>::type next{};
};
};
/// @} annotations
PYBIND11_NAMESPACE_BEGIN(detail)
/* Forward declarations */
enum op_id : int;
enum op_type : int;
struct undefined_t;
template <op_id id, op_type ot, typename L = undefined_t, typename R = undefined_t>
struct op_;
void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret);
/// Internal data structure which holds metadata about a keyword argument
struct argument_record {
const char *name; ///< Argument name
const char *descr; ///< Human-readable version of the argument value
handle value; ///< Associated Python object
bool convert : 1; ///< True if the argument is allowed to convert when loading
bool none : 1; ///< True if None is allowed when loading
argument_record(const char *name, const char *descr, handle value, bool convert, bool none)
: name(name), descr(descr), value(value), convert(convert), none(none) {}
};
/// Internal data structure which holds metadata about a bound function (signature, overloads,
/// etc.)
struct function_record {
function_record()
: is_constructor(false), is_new_style_constructor(false), is_stateless(false),
is_operator(false), is_method(false), is_setter(false), has_args(false),
has_kwargs(false), prepend(false) {}
/// Function name
char *name = nullptr; /* why no C++ strings? They generate heavier code.. */
// User-specified documentation string
char *doc = nullptr;
/// Human-readable version of the function signature
char *signature = nullptr;
/// List of registered keyword arguments
std::vector<argument_record> args;
/// Pointer to lambda function which converts arguments and performs the actual call
handle (*impl)(function_call &) = nullptr;
/// Storage for the wrapped function pointer and captured data, if any
void *data[3] = {};
/// Pointer to custom destructor for 'data' (if needed)
void (*free_data)(function_record *ptr) = nullptr;
/// Return value policy associated with this function
return_value_policy policy = return_value_policy::automatic;
/// True if name == '__init__'
bool is_constructor : 1;
/// True if this is a new-style `__init__` defined in `detail/init.h`
bool is_new_style_constructor : 1;
/// True if this is a stateless function pointer
bool is_stateless : 1;
/// True if this is an operator (__add__), etc.
bool is_operator : 1;
/// True if this is a method
bool is_method : 1;
/// True if this is a setter
bool is_setter : 1;
/// True if the function has a '*args' argument
bool has_args : 1;
/// True if the function has a '**kwargs' argument
bool has_kwargs : 1;
/// True if this function is to be inserted at the beginning of the overload resolution chain
bool prepend : 1;
/// Number of arguments (including py::args and/or py::kwargs, if present)
std::uint16_t nargs;
/// Number of leading positional arguments, which are terminated by a py::args or py::kwargs
/// argument or by a py::kw_only annotation.
std::uint16_t nargs_pos = 0;
/// Number of leading arguments (counted in `nargs`) that are positional-only
std::uint16_t nargs_pos_only = 0;
/// Python method object
PyMethodDef *def = nullptr;
/// Python handle to the parent scope (a class or a module)
handle scope;
/// Python handle to the sibling function representing an overload chain
handle sibling;
/// Pointer to next overload
function_record *next = nullptr;
};
/// Special data structure which (temporarily) holds metadata about a bound class
struct type_record {
PYBIND11_NOINLINE type_record()
: multiple_inheritance(false), dynamic_attr(false), buffer_protocol(false),
default_holder(true), module_local(false), is_final(false) {}
/// Handle to the parent scope
handle scope;
/// Name of the class
const char *name = nullptr;
// Pointer to RTTI type_info data structure
const std::type_info *type = nullptr;
/// How large is the underlying C++ type?
size_t type_size = 0;
/// What is the alignment of the underlying C++ type?
size_t type_align = 0;
/// How large is the type's holder?
size_t holder_size = 0;
/// The global operator new can be overridden with a class-specific variant
void *(*operator_new)(size_t) = nullptr;
/// Function pointer to class_<..>::init_instance
void (*init_instance)(instance *, const void *) = nullptr;
/// Function pointer to class_<..>::dealloc
void (*dealloc)(detail::value_and_holder &) = nullptr;
/// List of base classes of the newly created type
list bases;
/// Optional docstring
const char *doc = nullptr;
/// Custom metaclass (optional)
handle metaclass;
/// Custom type setup.
custom_type_setup::callback custom_type_setup_callback;
/// Multiple inheritance marker
bool multiple_inheritance : 1;
/// Does the class manage a __dict__?
bool dynamic_attr : 1;
/// Does the class implement the buffer protocol?
bool buffer_protocol : 1;
/// Is the default (unique_ptr) holder type used?
bool default_holder : 1;
/// Is the class definition local to the module shared object?
bool module_local : 1;
/// Is the class inheritable from python classes?
bool is_final : 1;
PYBIND11_NOINLINE void add_base(const std::type_info &base, void *(*caster)(void *) ) {
auto *base_info = detail::get_type_info(base, false);
if (!base_info) {
std::string tname(base.name());
detail::clean_type_id(tname);
pybind11_fail("generic_type: type \"" + std::string(name)
+ "\" referenced unknown base type \"" + tname + "\"");
}
if (default_holder != base_info->default_holder) {
std::string tname(base.name());
detail::clean_type_id(tname);
pybind11_fail("generic_type: type \"" + std::string(name) + "\" "
+ (default_holder ? "does not have" : "has")
+ " a non-default holder type while its base \"" + tname + "\" "
+ (base_info->default_holder ? "does not" : "does"));
}
bases.append((PyObject *) base_info->type);
#if PY_VERSION_HEX < 0x030B0000
dynamic_attr |= base_info->type->tp_dictoffset != 0;
#else
dynamic_attr |= (base_info->type->tp_flags & Py_TPFLAGS_MANAGED_DICT) != 0;
#endif
if (caster) {
base_info->implicit_casts.emplace_back(type, caster);
}
}
};
inline function_call::function_call(const function_record &f, handle p) : func(f), parent(p) {
args.reserve(f.nargs);
args_convert.reserve(f.nargs);
}
/// Tag for a new-style `__init__` defined in `detail/init.h`
struct is_new_style_constructor {};
/**
* Partial template specializations to process custom attributes provided to
* cpp_function_ and class_. These are either used to initialize the respective
* fields in the type_record and function_record data structures or executed at
* runtime to deal with custom call policies (e.g. keep_alive).
*/
template <typename T, typename SFINAE = void>
struct process_attribute;
template <typename T>
struct process_attribute_default {
/// Default implementation: do nothing
static void init(const T &, function_record *) {}
static void init(const T &, type_record *) {}
static void precall(function_call &) {}
static void postcall(function_call &, handle) {}
};
/// Process an attribute specifying the function's name
template <>
struct process_attribute<name> : process_attribute_default<name> {
static void init(const name &n, function_record *r) { r->name = const_cast<char *>(n.value); }
};
/// Process an attribute specifying the function's docstring
template <>
struct process_attribute<doc> : process_attribute_default<doc> {
static void init(const doc &n, function_record *r) { r->doc = const_cast<char *>(n.value); }
};
/// Process an attribute specifying the function's docstring (provided as a C-style string)
template <>
struct process_attribute<const char *> : process_attribute_default<const char *> {
static void init(const char *d, function_record *r) { r->doc = const_cast<char *>(d); }
static void init(const char *d, type_record *r) { r->doc = d; }
};
template <>
struct process_attribute<char *> : process_attribute<const char *> {};
/// Process an attribute indicating the function's return value policy
template <>
struct process_attribute<return_value_policy> : process_attribute_default<return_value_policy> {
static void init(const return_value_policy &p, function_record *r) { r->policy = p; }
};
/// Process an attribute which indicates that this is an overloaded function associated with a
/// given sibling
template <>
struct process_attribute<sibling> : process_attribute_default<sibling> {
static void init(const sibling &s, function_record *r) { r->sibling = s.value; }
};
/// Process an attribute which indicates that this function is a method
template <>
struct process_attribute<is_method> : process_attribute_default<is_method> {
static void init(const is_method &s, function_record *r) {
r->is_method = true;
r->scope = s.class_;
}
};
/// Process an attribute which indicates that this function is a setter
template <>
struct process_attribute<is_setter> : process_attribute_default<is_setter> {
static void init(const is_setter &, function_record *r) { r->is_setter = true; }
};
/// Process an attribute which indicates the parent scope of a method
template <>
struct process_attribute<scope> : process_attribute_default<scope> {
static void init(const scope &s, function_record *r) { r->scope = s.value; }
};
/// Process an attribute which indicates that this function is an operator
template <>
struct process_attribute<is_operator> : process_attribute_default<is_operator> {
static void init(const is_operator &, function_record *r) { r->is_operator = true; }
};
template <>
struct process_attribute<is_new_style_constructor>
: process_attribute_default<is_new_style_constructor> {
static void init(const is_new_style_constructor &, function_record *r) {
r->is_new_style_constructor = true;
}
};
inline void check_kw_only_arg(const arg &a, function_record *r) {
if (r->args.size() > r->nargs_pos && (!a.name || a.name[0] == '\0')) {
pybind11_fail("arg(): cannot specify an unnamed argument after a kw_only() annotation or "
"args() argument");
}
}
inline void append_self_arg_if_needed(function_record *r) {
if (r->is_method && r->args.empty()) {
r->args.emplace_back("self", nullptr, handle(), /*convert=*/true, /*none=*/false);
}
}
/// Process a keyword argument attribute (*without* a default value)
template <>
struct process_attribute<arg> : process_attribute_default<arg> {
static void init(const arg &a, function_record *r) {
append_self_arg_if_needed(r);
r->args.emplace_back(a.name, nullptr, handle(), !a.flag_noconvert, a.flag_none);
check_kw_only_arg(a, r);
}
};
/// Process a keyword argument attribute (*with* a default value)
template <>
struct process_attribute<arg_v> : process_attribute_default<arg_v> {
static void init(const arg_v &a, function_record *r) {
if (r->is_method && r->args.empty()) {
r->args.emplace_back(
"self", /*descr=*/nullptr, /*parent=*/handle(), /*convert=*/true, /*none=*/false);
}
if (!a.value) {
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
std::string descr("'");
if (a.name) {
descr += std::string(a.name) + ": ";
}
descr += a.type + "'";
if (r->is_method) {
if (r->name) {
descr += " in method '" + (std::string) str(r->scope) + "."
+ (std::string) r->name + "'";
} else {
descr += " in method of '" + (std::string) str(r->scope) + "'";
}
} else if (r->name) {
descr += " in function '" + (std::string) r->name + "'";
}
pybind11_fail("arg(): could not convert default argument " + descr
+ " into a Python object (type not registered yet?)");
#else
pybind11_fail("arg(): could not convert default argument "
"into a Python object (type not registered yet?). "
"#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for "
"more information.");
#endif
}
r->args.emplace_back(a.name, a.descr, a.value.inc_ref(), !a.flag_noconvert, a.flag_none);
check_kw_only_arg(a, r);
}
};
/// Process a keyword-only-arguments-follow pseudo argument
template <>
struct process_attribute<kw_only> : process_attribute_default<kw_only> {
static void init(const kw_only &, function_record *r) {
append_self_arg_if_needed(r);
if (r->has_args && r->nargs_pos != static_cast<std::uint16_t>(r->args.size())) {
pybind11_fail("Mismatched args() and kw_only(): they must occur at the same relative "
"argument location (or omit kw_only() entirely)");
}
r->nargs_pos = static_cast<std::uint16_t>(r->args.size());
}
};
/// Process a positional-only-argument maker
template <>
struct process_attribute<pos_only> : process_attribute_default<pos_only> {
static void init(const pos_only &, function_record *r) {
append_self_arg_if_needed(r);
r->nargs_pos_only = static_cast<std::uint16_t>(r->args.size());
if (r->nargs_pos_only > r->nargs_pos) {
pybind11_fail("pos_only(): cannot follow a py::args() argument");
}
// It also can't follow a kw_only, but a static_assert in pybind11.h checks that
}
};
/// Process a parent class attribute. Single inheritance only (class_ itself already guarantees
/// that)
template <typename T>
struct process_attribute<T, enable_if_t<is_pyobject<T>::value>>
: process_attribute_default<handle> {
static void init(const handle &h, type_record *r) { r->bases.append(h); }
};
/// Process a parent class attribute (deprecated, does not support multiple inheritance)
template <typename T>
struct process_attribute<base<T>> : process_attribute_default<base<T>> {
static void init(const base<T> &, type_record *r) { r->add_base(typeid(T), nullptr); }
};
/// Process a multiple inheritance attribute
template <>
struct process_attribute<multiple_inheritance> : process_attribute_default<multiple_inheritance> {
static void init(const multiple_inheritance &, type_record *r) {
r->multiple_inheritance = true;
}
};
template <>
struct process_attribute<dynamic_attr> : process_attribute_default<dynamic_attr> {
static void init(const dynamic_attr &, type_record *r) { r->dynamic_attr = true; }
};
template <>
struct process_attribute<custom_type_setup> {
static void init(const custom_type_setup &value, type_record *r) {
r->custom_type_setup_callback = value.value;
}
};
template <>
struct process_attribute<is_final> : process_attribute_default<is_final> {
static void init(const is_final &, type_record *r) { r->is_final = true; }
};
template <>
struct process_attribute<buffer_protocol> : process_attribute_default<buffer_protocol> {
static void init(const buffer_protocol &, type_record *r) { r->buffer_protocol = true; }
};
template <>
struct process_attribute<metaclass> : process_attribute_default<metaclass> {
static void init(const metaclass &m, type_record *r) { r->metaclass = m.value; }
};
template <>
struct process_attribute<module_local> : process_attribute_default<module_local> {
static void init(const module_local &l, type_record *r) { r->module_local = l.value; }
};
/// Process a 'prepend' attribute, putting this at the beginning of the overload chain
template <>
struct process_attribute<prepend> : process_attribute_default<prepend> {
static void init(const prepend &, function_record *r) { r->prepend = true; }
};
/// Process an 'arithmetic' attribute for enums (does nothing here)
template <>
struct process_attribute<arithmetic> : process_attribute_default<arithmetic> {};
template <typename... Ts>
struct process_attribute<call_guard<Ts...>> : process_attribute_default<call_guard<Ts...>> {};
/**
* Process a keep_alive call policy -- invokes keep_alive_impl during the
* pre-call handler if both Nurse, Patient != 0 and use the post-call handler
* otherwise
*/
template <size_t Nurse, size_t Patient>
struct process_attribute<keep_alive<Nurse, Patient>>
: public process_attribute_default<keep_alive<Nurse, Patient>> {
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>
static void precall(function_call &call) {
keep_alive_impl(Nurse, Patient, call, handle());
}
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>
static void postcall(function_call &, handle) {}
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>
static void precall(function_call &) {}
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>
static void postcall(function_call &call, handle ret) {
keep_alive_impl(Nurse, Patient, call, ret);
}
};
/// Recursively iterate over variadic template arguments
template <typename... Args>
struct process_attributes {
static void init(const Args &...args, function_record *r) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(r);
PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(r);
using expander = int[];
(void) expander{
0, ((void) process_attribute<typename std::decay<Args>::type>::init(args, r), 0)...};
}
static void init(const Args &...args, type_record *r) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(r);
PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(r);
using expander = int[];
(void) expander{0,
(process_attribute<typename std::decay<Args>::type>::init(args, r), 0)...};
}
static void precall(function_call &call) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(call);
using expander = int[];
(void) expander{0,
(process_attribute<typename std::decay<Args>::type>::precall(call), 0)...};
}
static void postcall(function_call &call, handle fn_ret) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(call, fn_ret);
PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(fn_ret);
using expander = int[];
(void) expander{
0, (process_attribute<typename std::decay<Args>::type>::postcall(call, fn_ret), 0)...};
}
};
template <typename T>
using is_call_guard = is_instantiation<call_guard, T>;
/// Extract the ``type`` from the first `call_guard` in `Extras...` (or `void_type` if none found)
template <typename... Extra>
using extract_guard_t = typename exactly_one_t<is_call_guard, call_guard<>, Extra...>::type;
/// Check the number of named arguments at compile time
template <typename... Extra,
size_t named = constexpr_sum(std::is_base_of<arg, Extra>::value...),
size_t self = constexpr_sum(std::is_same<is_method, Extra>::value...)>
constexpr bool expected_num_args(size_t nargs, bool has_args, bool has_kwargs) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(nargs, has_args, has_kwargs);
return named == 0 || (self + named + size_t(has_args) + size_t(has_kwargs)) == nargs;
}
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
============================================================================================================================
SOURCE CODE FILE: buffer_info.h
LINES: 1
SIZE: 7.80 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\buffer_info.h
ENCODING: utf-8
```h
/*
pybind11/buffer_info.h: Python buffer object interface
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
// Default, C-style strides
inline std::vector<ssize_t> c_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
auto ndim = shape.size();
std::vector<ssize_t> strides(ndim, itemsize);
if (ndim > 0) {
for (size_t i = ndim - 1; i > 0; --i) {
strides[i - 1] = strides[i] * shape[i];
}
}
return strides;
}
// F-style strides; default when constructing an array_t with `ExtraFlags & f_style`
inline std::vector<ssize_t> f_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
auto ndim = shape.size();
std::vector<ssize_t> strides(ndim, itemsize);
for (size_t i = 1; i < ndim; ++i) {
strides[i] = strides[i - 1] * shape[i - 1];
}
return strides;
}
template <typename T, typename SFINAE = void>
struct compare_buffer_info;
PYBIND11_NAMESPACE_END(detail)
/// Information record describing a Python buffer object
struct buffer_info {
void *ptr = nullptr; // Pointer to the underlying storage
ssize_t itemsize = 0; // Size of individual items in bytes
ssize_t size = 0; // Total number of entries
std::string format; // For homogeneous buffers, this should be set to
// format_descriptor<T>::format()
ssize_t ndim = 0; // Number of dimensions
std::vector<ssize_t> shape; // Shape of the tensor (1 entry per dimension)
std::vector<ssize_t> strides; // Number of bytes between adjacent entries
// (for each per dimension)
bool readonly = false; // flag to indicate if the underlying storage may be written to
buffer_info() = default;
buffer_info(void *ptr,
ssize_t itemsize,
const std::string &format,
ssize_t ndim,
detail::any_container<ssize_t> shape_in,
detail::any_container<ssize_t> strides_in,
bool readonly = false)
: ptr(ptr), itemsize(itemsize), size(1), format(format), ndim(ndim),
shape(std::move(shape_in)), strides(std::move(strides_in)), readonly(readonly) {
if (ndim != (ssize_t) shape.size() || ndim != (ssize_t) strides.size()) {
pybind11_fail("buffer_info: ndim doesn't match shape and/or strides length");
}
for (size_t i = 0; i < (size_t) ndim; ++i) {
size *= shape[i];
}
}
template <typename T>
buffer_info(T *ptr,
detail::any_container<ssize_t> shape_in,
detail::any_container<ssize_t> strides_in,
bool readonly = false)
: buffer_info(private_ctr_tag(),
ptr,
sizeof(T),
format_descriptor<T>::format(),
static_cast<ssize_t>(shape_in->size()),
std::move(shape_in),
std::move(strides_in),
readonly) {}
buffer_info(void *ptr,
ssize_t itemsize,
const std::string &format,
ssize_t size,
bool readonly = false)
: buffer_info(ptr, itemsize, format, 1, {size}, {itemsize}, readonly) {}
template <typename T>
buffer_info(T *ptr, ssize_t size, bool readonly = false)
: buffer_info(ptr, sizeof(T), format_descriptor<T>::format(), size, readonly) {}
template <typename T>
buffer_info(const T *ptr, ssize_t size, bool readonly = true)
: buffer_info(
const_cast<T *>(ptr), sizeof(T), format_descriptor<T>::format(), size, readonly) {}
explicit buffer_info(Py_buffer *view, bool ownview = true)
: buffer_info(
view->buf,
view->itemsize,
view->format,
view->ndim,
{view->shape, view->shape + view->ndim},
/* Though buffer::request() requests PyBUF_STRIDES, ctypes objects
* ignore this flag and return a view with NULL strides.
* When strides are NULL, build them manually. */
view->strides
? std::vector<ssize_t>(view->strides, view->strides + view->ndim)
: detail::c_strides({view->shape, view->shape + view->ndim}, view->itemsize),
(view->readonly != 0)) {
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
this->m_view = view;
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
this->ownview = ownview;
}
buffer_info(const buffer_info &) = delete;
buffer_info &operator=(const buffer_info &) = delete;
buffer_info(buffer_info &&other) noexcept { (*this) = std::move(other); }
buffer_info &operator=(buffer_info &&rhs) noexcept {
ptr = rhs.ptr;
itemsize = rhs.itemsize;
size = rhs.size;
format = std::move(rhs.format);
ndim = rhs.ndim;
shape = std::move(rhs.shape);
strides = std::move(rhs.strides);
std::swap(m_view, rhs.m_view);
std::swap(ownview, rhs.ownview);
readonly = rhs.readonly;
return *this;
}
~buffer_info() {
if (m_view && ownview) {
PyBuffer_Release(m_view);
delete m_view;
}
}
Py_buffer *view() const { return m_view; }
Py_buffer *&view() { return m_view; }
/* True if the buffer item type is equivalent to `T`. */
// To define "equivalent" by example:
// `buffer_info::item_type_is_equivalent_to<int>(b)` and
// `buffer_info::item_type_is_equivalent_to<long>(b)` may both be true
// on some platforms, but `int` and `unsigned` will never be equivalent.
// For the ground truth, please inspect `detail::compare_buffer_info<>`.
template <typename T>
bool item_type_is_equivalent_to() const {
return detail::compare_buffer_info<T>::compare(*this);
}
private:
struct private_ctr_tag {};
buffer_info(private_ctr_tag,
void *ptr,
ssize_t itemsize,
const std::string &format,
ssize_t ndim,
detail::any_container<ssize_t> &&shape_in,
detail::any_container<ssize_t> &&strides_in,
bool readonly)
: buffer_info(
ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in), readonly) {}
Py_buffer *m_view = nullptr;
bool ownview = false;
};
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename T, typename SFINAE>
struct compare_buffer_info {
static bool compare(const buffer_info &b) {
// NOLINTNEXTLINE(bugprone-sizeof-expression) Needed for `PyObject *`
return b.format == format_descriptor<T>::format() && b.itemsize == (ssize_t) sizeof(T);
}
};
template <typename T>
struct compare_buffer_info<T, detail::enable_if_t<std::is_integral<T>::value>> {
static bool compare(const buffer_info &b) {
return (size_t) b.itemsize == sizeof(T)
&& (b.format == format_descriptor<T>::value
|| ((sizeof(T) == sizeof(long))
&& b.format == (std::is_unsigned<T>::value ? "L" : "l"))
|| ((sizeof(T) == sizeof(size_t))
&& b.format == (std::is_unsigned<T>::value ? "N" : "n")));
}
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=====================================================================================================================
SOURCE CODE FILE: cast.h
LINES: 1
SIZE: 71.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\cast.h
ENCODING: utf-8
```h
/*
pybind11/cast.h: Partial template specializations to cast between
C++ and Python types
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
#include "detail/descr.h"
#include "detail/type_caster_base.h"
#include "detail/typeid.h"
#include "pytypes.h"
#include <array>
#include <cstring>
#include <functional>
#include <iosfwd>
#include <iterator>
#include <memory>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_WARNING_DISABLE_MSVC(4127)
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename type, typename SFINAE = void>
class type_caster : public type_caster_base<type> {};
template <typename type>
using make_caster = type_caster<intrinsic_t<type>>;
// Shortcut for calling a caster's `cast_op_type` cast operator for casting a type_caster to a T
template <typename T>
typename make_caster<T>::template cast_op_type<T> cast_op(make_caster<T> &caster) {
using result_t = typename make_caster<T>::template cast_op_type<T>; // See PR #4893
return caster.operator result_t();
}
template <typename T>
typename make_caster<T>::template cast_op_type<typename std::add_rvalue_reference<T>::type>
cast_op(make_caster<T> &&caster) {
using result_t = typename make_caster<T>::template cast_op_type<
typename std::add_rvalue_reference<T>::type>; // See PR #4893
return std::move(caster).operator result_t();
}
template <typename type>
class type_caster<std::reference_wrapper<type>> {
private:
using caster_t = make_caster<type>;
caster_t subcaster;
using reference_t = type &;
using subcaster_cast_op_type = typename caster_t::template cast_op_type<reference_t>;
static_assert(
std::is_same<typename std::remove_const<type>::type &, subcaster_cast_op_type>::value
|| std::is_same<reference_t, subcaster_cast_op_type>::value,
"std::reference_wrapper<T> caster requires T to have a caster with an "
"`operator T &()` or `operator const T &()`");
public:
bool load(handle src, bool convert) { return subcaster.load(src, convert); }
static constexpr auto name = caster_t::name;
static handle
cast(const std::reference_wrapper<type> &src, return_value_policy policy, handle parent) {
// It is definitely wrong to take ownership of this pointer, so mask that rvp
if (policy == return_value_policy::take_ownership
|| policy == return_value_policy::automatic) {
policy = return_value_policy::automatic_reference;
}
return caster_t::cast(&src.get(), policy, parent);
}
template <typename T>
using cast_op_type = std::reference_wrapper<type>;
explicit operator std::reference_wrapper<type>() { return cast_op<type &>(subcaster); }
};
#define PYBIND11_TYPE_CASTER(type, py_name) \
protected: \
type value; \
\
public: \
static constexpr auto name = py_name; \
template <typename T_, \
::pybind11::detail::enable_if_t< \
std::is_same<type, ::pybind11::detail::remove_cv_t<T_>>::value, \
int> \
= 0> \
static ::pybind11::handle cast( \
T_ *src, ::pybind11::return_value_policy policy, ::pybind11::handle parent) { \
if (!src) \
return ::pybind11::none().release(); \
if (policy == ::pybind11::return_value_policy::take_ownership) { \
auto h = cast(std::move(*src), policy, parent); \
delete src; \
return h; \
} \
return cast(*src, policy, parent); \
} \
operator type *() { return &value; } /* NOLINT(bugprone-macro-parentheses) */ \
operator type &() { return value; } /* NOLINT(bugprone-macro-parentheses) */ \
operator type &&() && { return std::move(value); } /* NOLINT(bugprone-macro-parentheses) */ \
template <typename T_> \
using cast_op_type = ::pybind11::detail::movable_cast_op_type<T_>
template <typename CharT>
using is_std_char_type = any_of<std::is_same<CharT, char>, /* std::string */
#if defined(PYBIND11_HAS_U8STRING)
std::is_same<CharT, char8_t>, /* std::u8string */
#endif
std::is_same<CharT, char16_t>, /* std::u16string */
std::is_same<CharT, char32_t>, /* std::u32string */
std::is_same<CharT, wchar_t> /* std::wstring */
>;
template <typename T>
struct type_caster<T, enable_if_t<std::is_arithmetic<T>::value && !is_std_char_type<T>::value>> {
using _py_type_0 = conditional_t<sizeof(T) <= sizeof(long), long, long long>;
using _py_type_1 = conditional_t<std::is_signed<T>::value,
_py_type_0,
typename std::make_unsigned<_py_type_0>::type>;
using py_type = conditional_t<std::is_floating_point<T>::value, double, _py_type_1>;
public:
bool load(handle src, bool convert) {
py_type py_value;
if (!src) {
return false;
}
#if !defined(PYPY_VERSION)
auto index_check = [](PyObject *o) { return PyIndex_Check(o); };
#else
// In PyPy 7.3.3, `PyIndex_Check` is implemented by calling `__index__`,
// while CPython only considers the existence of `nb_index`/`__index__`.
auto index_check = [](PyObject *o) { return hasattr(o, "__index__"); };
#endif
if (std::is_floating_point<T>::value) {
if (convert || PyFloat_Check(src.ptr())) {
py_value = (py_type) PyFloat_AsDouble(src.ptr());
} else {
return false;
}
} else if (PyFloat_Check(src.ptr())
|| (!convert && !PYBIND11_LONG_CHECK(src.ptr()) && !index_check(src.ptr()))) {
return false;
} else {
handle src_or_index = src;
// PyPy: 7.3.7's 3.8 does not implement PyLong_*'s __index__ calls.
#if PY_VERSION_HEX < 0x03080000 || defined(PYPY_VERSION)
object index;
if (!PYBIND11_LONG_CHECK(src.ptr())) { // So: index_check(src.ptr())
index = reinterpret_steal<object>(PyNumber_Index(src.ptr()));
if (!index) {
PyErr_Clear();
if (!convert)
return false;
} else {
src_or_index = index;
}
}
#endif
if (std::is_unsigned<py_type>::value) {
py_value = as_unsigned<py_type>(src_or_index.ptr());
} else { // signed integer:
py_value = sizeof(T) <= sizeof(long)
? (py_type) PyLong_AsLong(src_or_index.ptr())
: (py_type) PYBIND11_LONG_AS_LONGLONG(src_or_index.ptr());
}
}
// Python API reported an error
bool py_err = py_value == (py_type) -1 && PyErr_Occurred();
// Check to see if the conversion is valid (integers should match exactly)
// Signed/unsigned checks happen elsewhere
if (py_err
|| (std::is_integral<T>::value && sizeof(py_type) != sizeof(T)
&& py_value != (py_type) (T) py_value)) {
PyErr_Clear();
if (py_err && convert && (PyNumber_Check(src.ptr()) != 0)) {
auto tmp = reinterpret_steal<object>(std::is_floating_point<T>::value
? PyNumber_Float(src.ptr())
: PyNumber_Long(src.ptr()));
PyErr_Clear();
return load(tmp, false);
}
return false;
}
value = (T) py_value;
return true;
}
template <typename U = T>
static typename std::enable_if<std::is_floating_point<U>::value, handle>::type
cast(U src, return_value_policy /* policy */, handle /* parent */) {
return PyFloat_FromDouble((double) src);
}
template <typename U = T>
static typename std::enable_if<!std::is_floating_point<U>::value && std::is_signed<U>::value
&& (sizeof(U) <= sizeof(long)),
handle>::type
cast(U src, return_value_policy /* policy */, handle /* parent */) {
return PYBIND11_LONG_FROM_SIGNED((long) src);
}
template <typename U = T>
static typename std::enable_if<!std::is_floating_point<U>::value && std::is_unsigned<U>::value
&& (sizeof(U) <= sizeof(unsigned long)),
handle>::type
cast(U src, return_value_policy /* policy */, handle /* parent */) {
return PYBIND11_LONG_FROM_UNSIGNED((unsigned long) src);
}
template <typename U = T>
static typename std::enable_if<!std::is_floating_point<U>::value && std::is_signed<U>::value
&& (sizeof(U) > sizeof(long)),
handle>::type
cast(U src, return_value_policy /* policy */, handle /* parent */) {
return PyLong_FromLongLong((long long) src);
}
template <typename U = T>
static typename std::enable_if<!std::is_floating_point<U>::value && std::is_unsigned<U>::value
&& (sizeof(U) > sizeof(unsigned long)),
handle>::type
cast(U src, return_value_policy /* policy */, handle /* parent */) {
return PyLong_FromUnsignedLongLong((unsigned long long) src);
}
PYBIND11_TYPE_CASTER(T, const_name<std::is_integral<T>::value>("int", "float"));
};
template <typename T>
struct void_caster {
public:
bool load(handle src, bool) {
if (src && src.is_none()) {
return true;
}
return false;
}
static handle cast(T, return_value_policy /* policy */, handle /* parent */) {
return none().release();
}
PYBIND11_TYPE_CASTER(T, const_name("None"));
};
template <>
class type_caster<void_type> : public void_caster<void_type> {};
template <>
class type_caster<void> : public type_caster<void_type> {
public:
using type_caster<void_type>::cast;
bool load(handle h, bool) {
if (!h) {
return false;
}
if (h.is_none()) {
value = nullptr;
return true;
}
/* Check if this is a capsule */
if (isinstance<capsule>(h)) {
value = reinterpret_borrow<capsule>(h);
return true;
}
/* Check if this is a C++ type */
const auto &bases = all_type_info((PyTypeObject *) type::handle_of(h).ptr());
if (bases.size() == 1) { // Only allowing loading from a single-value type
value = values_and_holders(reinterpret_cast<instance *>(h.ptr())).begin()->value_ptr();
return true;
}
/* Fail */
return false;
}
static handle cast(const void *ptr, return_value_policy /* policy */, handle /* parent */) {
if (ptr) {
return capsule(ptr).release();
}
return none().release();
}
template <typename T>
using cast_op_type = void *&;
explicit operator void *&() { return value; }
static constexpr auto name = const_name("capsule");
private:
void *value = nullptr;
};
template <>
class type_caster<std::nullptr_t> : public void_caster<std::nullptr_t> {};
template <>
class type_caster<bool> {
public:
bool load(handle src, bool convert) {
if (!src) {
return false;
}
if (src.ptr() == Py_True) {
value = true;
return true;
}
if (src.ptr() == Py_False) {
value = false;
return true;
}
if (convert || is_numpy_bool(src)) {
// (allow non-implicit conversion for numpy booleans), use strncmp
// since NumPy 1.x had an additional trailing underscore.
Py_ssize_t res = -1;
if (src.is_none()) {
res = 0; // None is implicitly converted to False
}
#if defined(PYPY_VERSION)
// On PyPy, check that "__bool__" attr exists
else if (hasattr(src, PYBIND11_BOOL_ATTR)) {
res = PyObject_IsTrue(src.ptr());
}
#else
// Alternate approach for CPython: this does the same as the above, but optimized
// using the CPython API so as to avoid an unneeded attribute lookup.
else if (auto *tp_as_number = src.ptr()->ob_type->tp_as_number) {
if (PYBIND11_NB_BOOL(tp_as_number)) {
res = (*PYBIND11_NB_BOOL(tp_as_number))(src.ptr());
}
}
#endif
if (res == 0 || res == 1) {
value = (res != 0);
return true;
}
PyErr_Clear();
}
return false;
}
static handle cast(bool src, return_value_policy /* policy */, handle /* parent */) {
return handle(src ? Py_True : Py_False).inc_ref();
}
PYBIND11_TYPE_CASTER(bool, const_name("bool"));
private:
// Test if an object is a NumPy boolean (without fetching the type).
static inline bool is_numpy_bool(handle object) {
const char *type_name = Py_TYPE(object.ptr())->tp_name;
// Name changed to `numpy.bool` in NumPy 2, `numpy.bool_` is needed for 1.x support
return std::strcmp("numpy.bool", type_name) == 0
|| std::strcmp("numpy.bool_", type_name) == 0;
}
};
// Helper class for UTF-{8,16,32} C++ stl strings:
template <typename StringType, bool IsView = false>
struct string_caster {
using CharT = typename StringType::value_type;
// Simplify life by being able to assume standard char sizes (the standard only guarantees
// minimums, but Python requires exact sizes)
static_assert(!std::is_same<CharT, char>::value || sizeof(CharT) == 1,
"Unsupported char size != 1");
#if defined(PYBIND11_HAS_U8STRING)
static_assert(!std::is_same<CharT, char8_t>::value || sizeof(CharT) == 1,
"Unsupported char8_t size != 1");
#endif
static_assert(!std::is_same<CharT, char16_t>::value || sizeof(CharT) == 2,
"Unsupported char16_t size != 2");
static_assert(!std::is_same<CharT, char32_t>::value || sizeof(CharT) == 4,
"Unsupported char32_t size != 4");
// wchar_t can be either 16 bits (Windows) or 32 (everywhere else)
static_assert(!std::is_same<CharT, wchar_t>::value || sizeof(CharT) == 2 || sizeof(CharT) == 4,
"Unsupported wchar_t size != 2/4");
static constexpr size_t UTF_N = 8 * sizeof(CharT);
bool load(handle src, bool) {
handle load_src = src;
if (!src) {
return false;
}
if (!PyUnicode_Check(load_src.ptr())) {
return load_raw(load_src);
}
// For UTF-8 we avoid the need for a temporary `bytes` object by using
// `PyUnicode_AsUTF8AndSize`.
if (UTF_N == 8) {
Py_ssize_t size = -1;
const auto *buffer
= reinterpret_cast<const CharT *>(PyUnicode_AsUTF8AndSize(load_src.ptr(), &size));
if (!buffer) {
PyErr_Clear();
return false;
}
value = StringType(buffer, static_cast<size_t>(size));
return true;
}
auto utfNbytes
= reinterpret_steal<object>(PyUnicode_AsEncodedString(load_src.ptr(),
UTF_N == 8 ? "utf-8"
: UTF_N == 16 ? "utf-16"
: "utf-32",
nullptr));
if (!utfNbytes) {
PyErr_Clear();
return false;
}
const auto *buffer
= reinterpret_cast<const CharT *>(PYBIND11_BYTES_AS_STRING(utfNbytes.ptr()));
size_t length = (size_t) PYBIND11_BYTES_SIZE(utfNbytes.ptr()) / sizeof(CharT);
// Skip BOM for UTF-16/32
if (UTF_N > 8) {
buffer++;
length--;
}
value = StringType(buffer, length);
// If we're loading a string_view we need to keep the encoded Python object alive:
if (IsView) {
loader_life_support::add_patient(utfNbytes);
}
return true;
}
static handle
cast(const StringType &src, return_value_policy /* policy */, handle /* parent */) {
const char *buffer = reinterpret_cast<const char *>(src.data());
auto nbytes = ssize_t(src.size() * sizeof(CharT));
handle s = decode_utfN(buffer, nbytes);
if (!s) {
throw error_already_set();
}
return s;
}
PYBIND11_TYPE_CASTER(StringType, const_name(PYBIND11_STRING_NAME));
private:
static handle decode_utfN(const char *buffer, ssize_t nbytes) {
#if !defined(PYPY_VERSION)
return UTF_N == 8 ? PyUnicode_DecodeUTF8(buffer, nbytes, nullptr)
: UTF_N == 16 ? PyUnicode_DecodeUTF16(buffer, nbytes, nullptr, nullptr)
: PyUnicode_DecodeUTF32(buffer, nbytes, nullptr, nullptr);
#else
// PyPy segfaults when on PyUnicode_DecodeUTF16 (and possibly on PyUnicode_DecodeUTF32 as
// well), so bypass the whole thing by just passing the encoding as a string value, which
// works properly:
return PyUnicode_Decode(buffer,
nbytes,
UTF_N == 8 ? "utf-8"
: UTF_N == 16 ? "utf-16"
: "utf-32",
nullptr);
#endif
}
// When loading into a std::string or char*, accept a bytes/bytearray object as-is (i.e.
// without any encoding/decoding attempt). For other C++ char sizes this is a no-op.
// which supports loading a unicode from a str, doesn't take this path.
template <typename C = CharT>
bool load_raw(enable_if_t<std::is_same<C, char>::value, handle> src) {
if (PYBIND11_BYTES_CHECK(src.ptr())) {
// We were passed raw bytes; accept it into a std::string or char*
// without any encoding attempt.
const char *bytes = PYBIND11_BYTES_AS_STRING(src.ptr());
if (!bytes) {
pybind11_fail("Unexpected PYBIND11_BYTES_AS_STRING() failure.");
}
value = StringType(bytes, (size_t) PYBIND11_BYTES_SIZE(src.ptr()));
return true;
}
if (PyByteArray_Check(src.ptr())) {
// We were passed a bytearray; accept it into a std::string or char*
// without any encoding attempt.
const char *bytearray = PyByteArray_AsString(src.ptr());
if (!bytearray) {
pybind11_fail("Unexpected PyByteArray_AsString() failure.");
}
value = StringType(bytearray, (size_t) PyByteArray_Size(src.ptr()));
return true;
}
return false;
}
template <typename C = CharT>
bool load_raw(enable_if_t<!std::is_same<C, char>::value, handle>) {
return false;
}
};
template <typename CharT, class Traits, class Allocator>
struct type_caster<std::basic_string<CharT, Traits, Allocator>,
enable_if_t<is_std_char_type<CharT>::value>>
: string_caster<std::basic_string<CharT, Traits, Allocator>> {};
#ifdef PYBIND11_HAS_STRING_VIEW
template <typename CharT, class Traits>
struct type_caster<std::basic_string_view<CharT, Traits>,
enable_if_t<is_std_char_type<CharT>::value>>
: string_caster<std::basic_string_view<CharT, Traits>, true> {};
#endif
// Type caster for C-style strings. We basically use a std::string type caster, but also add the
// ability to use None as a nullptr char* (which the string caster doesn't allow).
template <typename CharT>
struct type_caster<CharT, enable_if_t<is_std_char_type<CharT>::value>> {
using StringType = std::basic_string<CharT>;
using StringCaster = make_caster<StringType>;
StringCaster str_caster;
bool none = false;
CharT one_char = 0;
public:
bool load(handle src, bool convert) {
if (!src) {
return false;
}
if (src.is_none()) {
// Defer accepting None to other overloads (if we aren't in convert mode):
if (!convert) {
return false;
}
none = true;
return true;
}
return str_caster.load(src, convert);
}
static handle cast(const CharT *src, return_value_policy policy, handle parent) {
if (src == nullptr) {
return pybind11::none().release();
}
return StringCaster::cast(StringType(src), policy, parent);
}
static handle cast(CharT src, return_value_policy policy, handle parent) {
if (std::is_same<char, CharT>::value) {
handle s = PyUnicode_DecodeLatin1((const char *) &src, 1, nullptr);
if (!s) {
throw error_already_set();
}
return s;
}
return StringCaster::cast(StringType(1, src), policy, parent);
}
explicit operator CharT *() {
return none ? nullptr : const_cast<CharT *>(static_cast<StringType &>(str_caster).c_str());
}
explicit operator CharT &() {
if (none) {
throw value_error("Cannot convert None to a character");
}
auto &value = static_cast<StringType &>(str_caster);
size_t str_len = value.size();
if (str_len == 0) {
throw value_error("Cannot convert empty string to a character");
}
// If we're in UTF-8 mode, we have two possible failures: one for a unicode character that
// is too high, and one for multiple unicode characters (caught later), so we need to
// figure out how long the first encoded character is in bytes to distinguish between these
// two errors. We also allow want to allow unicode characters U+0080 through U+00FF, as
// those can fit into a single char value.
if (StringCaster::UTF_N == 8 && str_len > 1 && str_len <= 4) {
auto v0 = static_cast<unsigned char>(value[0]);
// low bits only: 0-127
// 0b110xxxxx - start of 2-byte sequence
// 0b1110xxxx - start of 3-byte sequence
// 0b11110xxx - start of 4-byte sequence
size_t char0_bytes = (v0 & 0x80) == 0 ? 1
: (v0 & 0xE0) == 0xC0 ? 2
: (v0 & 0xF0) == 0xE0 ? 3
: 4;
if (char0_bytes == str_len) {
// If we have a 128-255 value, we can decode it into a single char:
if (char0_bytes == 2 && (v0 & 0xFC) == 0xC0) { // 0x110000xx 0x10xxxxxx
one_char = static_cast<CharT>(((v0 & 3) << 6)
+ (static_cast<unsigned char>(value[1]) & 0x3F));
return one_char;
}
// Otherwise we have a single character, but it's > U+00FF
throw value_error("Character code point not in range(0x100)");
}
}
// UTF-16 is much easier: we can only have a surrogate pair for values above U+FFFF, thus a
// surrogate pair with total length 2 instantly indicates a range error (but not a "your
// string was too long" error).
else if (StringCaster::UTF_N == 16 && str_len == 2) {
one_char = static_cast<CharT>(value[0]);
if (one_char >= 0xD800 && one_char < 0xE000) {
throw value_error("Character code point not in range(0x10000)");
}
}
if (str_len != 1) {
throw value_error("Expected a character, but multi-character string found");
}
one_char = value[0];
return one_char;
}
static constexpr auto name = const_name(PYBIND11_STRING_NAME);
template <typename _T>
using cast_op_type = pybind11::detail::cast_op_type<_T>;
};
// Base implementation for std::tuple and std::pair
template <template <typename...> class Tuple, typename... Ts>
class tuple_caster {
using type = Tuple<Ts...>;
static constexpr auto size = sizeof...(Ts);
using indices = make_index_sequence<size>;
public:
bool load(handle src, bool convert) {
if (!isinstance<sequence>(src)) {
return false;
}
const auto seq = reinterpret_borrow<sequence>(src);
if (seq.size() != size) {
return false;
}
return load_impl(seq, convert, indices{});
}
template <typename T>
static handle cast(T &&src, return_value_policy policy, handle parent) {
return cast_impl(std::forward<T>(src), policy, parent, indices{});
}
// copied from the PYBIND11_TYPE_CASTER macro
template <typename T>
static handle cast(T *src, return_value_policy policy, handle parent) {
if (!src) {
return none().release();
}
if (policy == return_value_policy::take_ownership) {
auto h = cast(std::move(*src), policy, parent);
delete src;
return h;
}
return cast(*src, policy, parent);
}
static constexpr auto name = const_name("tuple[")
+ ::pybind11::detail::concat(make_caster<Ts>::name...)
+ const_name("]");
template <typename T>
using cast_op_type = type;
explicit operator type() & { return implicit_cast(indices{}); }
explicit operator type() && { return std::move(*this).implicit_cast(indices{}); }
protected:
template <size_t... Is>
type implicit_cast(index_sequence<Is...>) & {
return type(cast_op<Ts>(std::get<Is>(subcasters))...);
}
template <size_t... Is>
type implicit_cast(index_sequence<Is...>) && {
return type(cast_op<Ts>(std::move(std::get<Is>(subcasters)))...);
}
static constexpr bool load_impl(const sequence &, bool, index_sequence<>) { return true; }
template <size_t... Is>
bool load_impl(const sequence &seq, bool convert, index_sequence<Is...>) {
#ifdef __cpp_fold_expressions
if ((... || !std::get<Is>(subcasters).load(seq[Is], convert))) {
return false;
}
#else
for (bool r : {std::get<Is>(subcasters).load(seq[Is], convert)...}) {
if (!r) {
return false;
}
}
#endif
return true;
}
/* Implementation: Convert a C++ tuple into a Python tuple */
template <typename T, size_t... Is>
static handle
cast_impl(T &&src, return_value_policy policy, handle parent, index_sequence<Is...>) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(src, policy, parent);
PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(policy, parent);
std::array<object, size> entries{{reinterpret_steal<object>(
make_caster<Ts>::cast(std::get<Is>(std::forward<T>(src)), policy, parent))...}};
for (const auto &entry : entries) {
if (!entry) {
return handle();
}
}
tuple result(size);
int counter = 0;
for (auto &entry : entries) {
PyTuple_SET_ITEM(result.ptr(), counter++, entry.release().ptr());
}
return result.release();
}
Tuple<make_caster<Ts>...> subcasters;
};
template <typename T1, typename T2>
class type_caster<std::pair<T1, T2>> : public tuple_caster<std::pair, T1, T2> {};
template <typename... Ts>
class type_caster<std::tuple<Ts...>> : public tuple_caster<std::tuple, Ts...> {};
template <>
class type_caster<std::tuple<>> : public tuple_caster<std::tuple> {
public:
// PEP 484 specifies this syntax for an empty tuple
static constexpr auto name = const_name("tuple[()]");
};
/// Helper class which abstracts away certain actions. Users can provide specializations for
/// custom holders, but it's only necessary if the type has a non-standard interface.
template <typename T>
struct holder_helper {
static auto get(const T &p) -> decltype(p.get()) { return p.get(); }
};
/// Type caster for holder types like std::shared_ptr, etc.
/// The SFINAE hook is provided to help work around the current lack of support
/// for smart-pointer interoperability. Please consider it an implementation
/// detail that may change in the future, as formal support for smart-pointer
/// interoperability is added into pybind11.
template <typename type, typename holder_type, typename SFINAE = void>
struct copyable_holder_caster : public type_caster_base<type> {
public:
using base = type_caster_base<type>;
static_assert(std::is_base_of<base, type_caster<type>>::value,
"Holder classes are only supported for custom types");
using base::base;
using base::cast;
using base::typeinfo;
using base::value;
bool load(handle src, bool convert) {
return base::template load_impl<copyable_holder_caster<type, holder_type>>(src, convert);
}
explicit operator type *() { return this->value; }
// static_cast works around compiler error with MSVC 17 and CUDA 10.2
// see issue #2180
explicit operator type &() { return *(static_cast<type *>(this->value)); }
explicit operator holder_type *() { return std::addressof(holder); }
explicit operator holder_type &() { return holder; }
static handle cast(const holder_type &src, return_value_policy, handle) {
const auto *ptr = holder_helper<holder_type>::get(src);
return type_caster_base<type>::cast_holder(ptr, &src);
}
protected:
friend class type_caster_generic;
void check_holder_compat() {
if (typeinfo->default_holder) {
throw cast_error("Unable to load a custom holder type from a default-holder instance");
}
}
void load_value(value_and_holder &&v_h) {
if (v_h.holder_constructed()) {
value = v_h.value_ptr();
holder = v_h.template holder<holder_type>();
return;
}
throw cast_error("Unable to cast from non-held to held instance (T& to Holder<T>) "
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for "
"type information)");
#else
"of type '"
+ type_id<holder_type>() + "''");
#endif
}
template <typename T = holder_type,
detail::enable_if_t<!std::is_constructible<T, const T &, type *>::value, int> = 0>
bool try_implicit_casts(handle, bool) {
return false;
}
template <typename T = holder_type,
detail::enable_if_t<std::is_constructible<T, const T &, type *>::value, int> = 0>
bool try_implicit_casts(handle src, bool convert) {
for (auto &cast : typeinfo->implicit_casts) {
copyable_holder_caster sub_caster(*cast.first);
if (sub_caster.load(src, convert)) {
value = cast.second(sub_caster.value);
holder = holder_type(sub_caster.holder, (type *) value);
return true;
}
}
return false;
}
static bool try_direct_conversions(handle) { return false; }
holder_type holder;
};
/// Specialize for the common std::shared_ptr, so users don't need to
template <typename T>
class type_caster<std::shared_ptr<T>> : public copyable_holder_caster<T, std::shared_ptr<T>> {};
/// Type caster for holder types like std::unique_ptr.
/// Please consider the SFINAE hook an implementation detail, as explained
/// in the comment for the copyable_holder_caster.
template <typename type, typename holder_type, typename SFINAE = void>
struct move_only_holder_caster {
static_assert(std::is_base_of<type_caster_base<type>, type_caster<type>>::value,
"Holder classes are only supported for custom types");
static handle cast(holder_type &&src, return_value_policy, handle) {
auto *ptr = holder_helper<holder_type>::get(src);
return type_caster_base<type>::cast_holder(ptr, std::addressof(src));
}
static constexpr auto name = type_caster_base<type>::name;
};
template <typename type, typename deleter>
class type_caster<std::unique_ptr<type, deleter>>
: public move_only_holder_caster<type, std::unique_ptr<type, deleter>> {};
template <typename type, typename holder_type>
using type_caster_holder = conditional_t<is_copy_constructible<holder_type>::value,
copyable_holder_caster<type, holder_type>,
move_only_holder_caster<type, holder_type>>;
template <typename T, bool Value = false>
struct always_construct_holder {
static constexpr bool value = Value;
};
/// Create a specialization for custom holder types (silently ignores std::shared_ptr)
#define PYBIND11_DECLARE_HOLDER_TYPE(type, holder_type, ...) \
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) \
namespace detail { \
template <typename type> \
struct always_construct_holder<holder_type> : always_construct_holder<void, ##__VA_ARGS__> { \
}; \
template <typename type> \
class type_caster<holder_type, enable_if_t<!is_shared_ptr<holder_type>::value>> \
: public type_caster_holder<type, holder_type> {}; \
} \
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
// PYBIND11_DECLARE_HOLDER_TYPE holder types:
template <typename base, typename holder>
struct is_holder_type
: std::is_base_of<detail::type_caster_holder<base, holder>, detail::type_caster<holder>> {};
// Specialization for always-supported unique_ptr holders:
template <typename base, typename deleter>
struct is_holder_type<base, std::unique_ptr<base, deleter>> : std::true_type {};
#ifdef PYBIND11_DISABLE_HANDLE_TYPE_NAME_DEFAULT_IMPLEMENTATION // See PR #4888
// This leads to compilation errors if a specialization is missing.
template <typename T>
struct handle_type_name;
#else
template <typename T>
struct handle_type_name {
static constexpr auto name = const_name<T>();
};
#endif
template <>
struct handle_type_name<object> {
static constexpr auto name = const_name("object");
};
template <>
struct handle_type_name<list> {
static constexpr auto name = const_name("list");
};
template <>
struct handle_type_name<dict> {
static constexpr auto name = const_name("dict");
};
template <>
struct handle_type_name<anyset> {
static constexpr auto name = const_name("Union[set, frozenset]");
};
template <>
struct handle_type_name<set> {
static constexpr auto name = const_name("set");
};
template <>
struct handle_type_name<frozenset> {
static constexpr auto name = const_name("frozenset");
};
template <>
struct handle_type_name<str> {
static constexpr auto name = const_name("str");
};
template <>
struct handle_type_name<tuple> {
static constexpr auto name = const_name("tuple");
};
template <>
struct handle_type_name<bool_> {
static constexpr auto name = const_name("bool");
};
template <>
struct handle_type_name<bytes> {
static constexpr auto name = const_name(PYBIND11_BYTES_NAME);
};
template <>
struct handle_type_name<buffer> {
static constexpr auto name = const_name("Buffer");
};
template <>
struct handle_type_name<int_> {
static constexpr auto name = const_name("int");
};
template <>
struct handle_type_name<iterable> {
static constexpr auto name = const_name("Iterable");
};
template <>
struct handle_type_name<iterator> {
static constexpr auto name = const_name("Iterator");
};
template <>
struct handle_type_name<float_> {
static constexpr auto name = const_name("float");
};
template <>
struct handle_type_name<function> {
static constexpr auto name = const_name("Callable");
};
template <>
struct handle_type_name<handle> {
static constexpr auto name = handle_type_name<object>::name;
};
template <>
struct handle_type_name<none> {
static constexpr auto name = const_name("None");
};
template <>
struct handle_type_name<sequence> {
static constexpr auto name = const_name("Sequence");
};
template <>
struct handle_type_name<bytearray> {
static constexpr auto name = const_name("bytearray");
};
template <>
struct handle_type_name<memoryview> {
static constexpr auto name = const_name("memoryview");
};
template <>
struct handle_type_name<slice> {
static constexpr auto name = const_name("slice");
};
template <>
struct handle_type_name<type> {
static constexpr auto name = const_name("type");
};
template <>
struct handle_type_name<capsule> {
static constexpr auto name = const_name("capsule");
};
template <>
struct handle_type_name<ellipsis> {
static constexpr auto name = const_name("ellipsis");
};
template <>
struct handle_type_name<weakref> {
static constexpr auto name = const_name("weakref");
};
template <>
struct handle_type_name<args> {
static constexpr auto name = const_name("*args");
};
template <>
struct handle_type_name<kwargs> {
static constexpr auto name = const_name("**kwargs");
};
template <>
struct handle_type_name<obj_attr_accessor> {
static constexpr auto name = const_name<obj_attr_accessor>();
};
template <>
struct handle_type_name<str_attr_accessor> {
static constexpr auto name = const_name<str_attr_accessor>();
};
template <>
struct handle_type_name<item_accessor> {
static constexpr auto name = const_name<item_accessor>();
};
template <>
struct handle_type_name<sequence_accessor> {
static constexpr auto name = const_name<sequence_accessor>();
};
template <>
struct handle_type_name<list_accessor> {
static constexpr auto name = const_name<list_accessor>();
};
template <>
struct handle_type_name<tuple_accessor> {
static constexpr auto name = const_name<tuple_accessor>();
};
template <typename type>
struct pyobject_caster {
template <typename T = type, enable_if_t<std::is_same<T, handle>::value, int> = 0>
pyobject_caster() : value() {}
// `type` may not be default constructible (e.g. frozenset, anyset). Initializing `value`
// to a nil handle is safe since it will only be accessed if `load` succeeds.
template <typename T = type, enable_if_t<std::is_base_of<object, T>::value, int> = 0>
pyobject_caster() : value(reinterpret_steal<type>(handle())) {}
template <typename T = type, enable_if_t<std::is_same<T, handle>::value, int> = 0>
bool load(handle src, bool /* convert */) {
value = src;
return static_cast<bool>(value);
}
template <typename T = type, enable_if_t<std::is_base_of<object, T>::value, int> = 0>
bool load(handle src, bool /* convert */) {
if (!isinstance<type>(src)) {
return false;
}
value = reinterpret_borrow<type>(src);
return true;
}
static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) {
return src.inc_ref();
}
PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name);
};
template <typename T>
class type_caster<T, enable_if_t<is_pyobject<T>::value>> : public pyobject_caster<T> {};
// Our conditions for enabling moving are quite restrictive:
// At compile time:
// - T needs to be a non-const, non-pointer, non-reference type
// - type_caster<T>::operator T&() must exist
// - the type must be move constructible (obviously)
// At run-time:
// - if the type is non-copy-constructible, the object must be the sole owner of the type (i.e. it
// must have ref_count() == 1)h
// If any of the above are not satisfied, we fall back to copying.
template <typename T>
using move_is_plain_type
= satisfies_none_of<T, std::is_void, std::is_pointer, std::is_reference, std::is_const>;
template <typename T, typename SFINAE = void>
struct move_always : std::false_type {};
template <typename T>
struct move_always<
T,
enable_if_t<
all_of<move_is_plain_type<T>,
negation<is_copy_constructible<T>>,
is_move_constructible<T>,
std::is_same<decltype(std::declval<make_caster<T>>().operator T &()), T &>>::value>>
: std::true_type {};
template <typename T, typename SFINAE = void>
struct move_if_unreferenced : std::false_type {};
template <typename T>
struct move_if_unreferenced<
T,
enable_if_t<
all_of<move_is_plain_type<T>,
negation<move_always<T>>,
is_move_constructible<T>,
std::is_same<decltype(std::declval<make_caster<T>>().operator T &()), T &>>::value>>
: std::true_type {};
template <typename T>
using move_never = none_of<move_always<T>, move_if_unreferenced<T>>;
// Detect whether returning a `type` from a cast on type's type_caster is going to result in a
// reference or pointer to a local variable of the type_caster. Basically, only
// non-reference/pointer `type`s and reference/pointers from a type_caster_generic are safe;
// everything else returns a reference/pointer to a local variable.
template <typename type>
using cast_is_temporary_value_reference
= bool_constant<(std::is_reference<type>::value || std::is_pointer<type>::value)
&& !std::is_base_of<type_caster_generic, make_caster<type>>::value
&& !std::is_same<intrinsic_t<type>, void>::value>;
// When a value returned from a C++ function is being cast back to Python, we almost always want to
// force `policy = move`, regardless of the return value policy the function/method was declared
// with.
template <typename Return, typename SFINAE = void>
struct return_value_policy_override {
static return_value_policy policy(return_value_policy p) { return p; }
};
template <typename Return>
struct return_value_policy_override<
Return,
detail::enable_if_t<std::is_base_of<type_caster_generic, make_caster<Return>>::value, void>> {
static return_value_policy policy(return_value_policy p) {
return !std::is_lvalue_reference<Return>::value && !std::is_pointer<Return>::value
? return_value_policy::move
: p;
}
};
// Basic python -> C++ casting; throws if casting fails
template <typename T, typename SFINAE>
type_caster<T, SFINAE> &load_type(type_caster<T, SFINAE> &conv, const handle &handle) {
static_assert(!detail::is_pyobject<T>::value,
"Internal error: type_caster should only be used for C++ types");
if (!conv.load(handle, true)) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
throw cast_error(
"Unable to cast Python instance of type "
+ str(type::handle_of(handle)).cast<std::string>()
+ " to C++ type '?' (#define "
"PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
#else
throw cast_error("Unable to cast Python instance of type "
+ str(type::handle_of(handle)).cast<std::string>() + " to C++ type '"
+ type_id<T>() + "'");
#endif
}
return conv;
}
// Wrapper around the above that also constructs and returns a type_caster
template <typename T>
make_caster<T> load_type(const handle &handle) {
make_caster<T> conv;
load_type(conv, handle);
return conv;
}
PYBIND11_NAMESPACE_END(detail)
// pytype -> C++ type
template <typename T,
detail::enable_if_t<!detail::is_pyobject<T>::value
&& !detail::is_same_ignoring_cvref<T, PyObject *>::value,
int>
= 0>
T cast(const handle &handle) {
using namespace detail;
static_assert(!cast_is_temporary_value_reference<T>::value,
"Unable to cast type to reference: value is local to type caster");
return cast_op<T>(load_type<T>(handle));
}
// pytype -> pytype (calls converting constructor)
template <typename T, detail::enable_if_t<detail::is_pyobject<T>::value, int> = 0>
T cast(const handle &handle) {
return T(reinterpret_borrow<object>(handle));
}
// Note that `cast<PyObject *>(obj)` increments the reference count of `obj`.
// This is necessary for the case that `obj` is a temporary, and could
// not possibly be different, given
// 1. the established convention that the passed `handle` is borrowed, and
// 2. we don't want to force all generic code using `cast<T>()` to special-case
// handling of `T` = `PyObject *` (to increment the reference count there).
// It is the responsibility of the caller to ensure that the reference count
// is decremented.
template <typename T,
typename Handle,
detail::enable_if_t<detail::is_same_ignoring_cvref<T, PyObject *>::value
&& detail::is_same_ignoring_cvref<Handle, handle>::value,
int>
= 0>
T cast(Handle &&handle) {
return handle.inc_ref().ptr();
}
// To optimize way an inc_ref/dec_ref cycle:
template <typename T,
typename Object,
detail::enable_if_t<detail::is_same_ignoring_cvref<T, PyObject *>::value
&& detail::is_same_ignoring_cvref<Object, object>::value,
int>
= 0>
T cast(Object &&obj) {
return obj.release().ptr();
}
// C++ type -> py::object
template <typename T, detail::enable_if_t<!detail::is_pyobject<T>::value, int> = 0>
object cast(T &&value,
return_value_policy policy = return_value_policy::automatic_reference,
handle parent = handle()) {
using no_ref_T = typename std::remove_reference<T>::type;
if (policy == return_value_policy::automatic) {
policy = std::is_pointer<no_ref_T>::value ? return_value_policy::take_ownership
: std::is_lvalue_reference<T>::value ? return_value_policy::copy
: return_value_policy::move;
} else if (policy == return_value_policy::automatic_reference) {
policy = std::is_pointer<no_ref_T>::value ? return_value_policy::reference
: std::is_lvalue_reference<T>::value ? return_value_policy::copy
: return_value_policy::move;
}
return reinterpret_steal<object>(
detail::make_caster<T>::cast(std::forward<T>(value), policy, parent));
}
template <typename T>
T handle::cast() const {
return pybind11::cast<T>(*this);
}
template <>
inline void handle::cast() const {
return;
}
template <typename T>
detail::enable_if_t<!detail::move_never<T>::value, T> move(object &&obj) {
if (obj.ref_count() > 1) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
throw cast_error(
"Unable to cast Python " + str(type::handle_of(obj)).cast<std::string>()
+ " instance to C++ rvalue: instance has multiple references"
" (#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
#else
throw cast_error("Unable to move from Python "
+ str(type::handle_of(obj)).cast<std::string>() + " instance to C++ "
+ type_id<T>() + " instance: instance has multiple references");
#endif
}
// Move into a temporary and return that, because the reference may be a local value of `conv`
T ret = std::move(detail::load_type<T>(obj).operator T &());
return ret;
}
// Calling cast() on an rvalue calls pybind11::cast with the object rvalue, which does:
// - If we have to move (because T has no copy constructor), do it. This will fail if the moved
// object has multiple references, but trying to copy will fail to compile.
// - If both movable and copyable, check ref count: if 1, move; otherwise copy
// - Otherwise (not movable), copy.
template <typename T>
detail::enable_if_t<!detail::is_pyobject<T>::value && detail::move_always<T>::value, T>
cast(object &&object) {
return move<T>(std::move(object));
}
template <typename T>
detail::enable_if_t<!detail::is_pyobject<T>::value && detail::move_if_unreferenced<T>::value, T>
cast(object &&object) {
if (object.ref_count() > 1) {
return cast<T>(object);
}
return move<T>(std::move(object));
}
template <typename T>
detail::enable_if_t<!detail::is_pyobject<T>::value && detail::move_never<T>::value, T>
cast(object &&object) {
return cast<T>(object);
}
// pytype rvalue -> pytype (calls converting constructor)
template <typename T>
detail::enable_if_t<detail::is_pyobject<T>::value, T> cast(object &&object) {
return T(std::move(object));
}
template <typename T>
T object::cast() const & {
return pybind11::cast<T>(*this);
}
template <typename T>
T object::cast() && {
return pybind11::cast<T>(std::move(*this));
}
template <>
inline void object::cast() const & {
return;
}
template <>
inline void object::cast() && {
return;
}
PYBIND11_NAMESPACE_BEGIN(detail)
// Declared in pytypes.h:
template <typename T, enable_if_t<!is_pyobject<T>::value, int>>
object object_or_cast(T &&o) {
return pybind11::cast(std::forward<T>(o));
}
// Placeholder type for the unneeded (and dead code) static variable in the
// PYBIND11_OVERRIDE_OVERRIDE macro
struct override_unused {};
template <typename ret_type>
using override_caster_t = conditional_t<cast_is_temporary_value_reference<ret_type>::value,
make_caster<ret_type>,
override_unused>;
// Trampoline use: for reference/pointer types to value-converted values, we do a value cast, then
// store the result in the given variable. For other types, this is a no-op.
template <typename T>
enable_if_t<cast_is_temporary_value_reference<T>::value, T> cast_ref(object &&o,
make_caster<T> &caster) {
return cast_op<T>(load_type(caster, o));
}
template <typename T>
enable_if_t<!cast_is_temporary_value_reference<T>::value, T> cast_ref(object &&,
override_unused &) {
pybind11_fail("Internal error: cast_ref fallback invoked");
}
// Trampoline use: Having a pybind11::cast with an invalid reference type is going to
// static_assert, even though if it's in dead code, so we provide a "trampoline" to pybind11::cast
// that only does anything in cases where pybind11::cast is valid.
template <typename T>
enable_if_t<cast_is_temporary_value_reference<T>::value
&& !detail::is_same_ignoring_cvref<T, PyObject *>::value,
T>
cast_safe(object &&) {
pybind11_fail("Internal error: cast_safe fallback invoked");
}
template <typename T>
enable_if_t<std::is_void<T>::value, void> cast_safe(object &&) {}
template <typename T>
enable_if_t<detail::is_same_ignoring_cvref<T, PyObject *>::value, PyObject *>
cast_safe(object &&o) {
return o.release().ptr();
}
template <typename T>
enable_if_t<detail::none_of<cast_is_temporary_value_reference<T>,
detail::is_same_ignoring_cvref<T, PyObject *>,
std::is_void<T>>::value,
T>
cast_safe(object &&o) {
return pybind11::cast<T>(std::move(o));
}
PYBIND11_NAMESPACE_END(detail)
// The overloads could coexist, i.e. the #if is not strictly speaking needed,
// but it is an easy minor optimization.
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
inline cast_error cast_error_unable_to_convert_call_arg(const std::string &name) {
return cast_error("Unable to convert call argument '" + name
+ "' to Python object (#define "
"PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
}
#else
inline cast_error cast_error_unable_to_convert_call_arg(const std::string &name,
const std::string &type) {
return cast_error("Unable to convert call argument '" + name + "' of type '" + type
+ "' to Python object");
}
#endif
template <return_value_policy policy = return_value_policy::automatic_reference>
tuple make_tuple() {
return tuple(0);
}
template <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>
tuple make_tuple(Args &&...args_) {
constexpr size_t size = sizeof...(Args);
std::array<object, size> args{{reinterpret_steal<object>(
detail::make_caster<Args>::cast(std::forward<Args>(args_), policy, nullptr))...}};
for (size_t i = 0; i < args.size(); i++) {
if (!args[i]) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
throw cast_error_unable_to_convert_call_arg(std::to_string(i));
#else
std::array<std::string, size> argtypes{{type_id<Args>()...}};
throw cast_error_unable_to_convert_call_arg(std::to_string(i), argtypes[i]);
#endif
}
}
tuple result(size);
int counter = 0;
for (auto &arg_value : args) {
PyTuple_SET_ITEM(result.ptr(), counter++, arg_value.release().ptr());
}
return result;
}
/// \ingroup annotations
/// Annotation for arguments
struct arg {
/// Constructs an argument with the name of the argument; if null or omitted, this is a
/// positional argument.
constexpr explicit arg(const char *name = nullptr)
: name(name), flag_noconvert(false), flag_none(true) {}
/// Assign a value to this argument
template <typename T>
arg_v operator=(T &&value) const;
/// Indicate that the type should not be converted in the type caster
arg &noconvert(bool flag = true) {
flag_noconvert = flag;
return *this;
}
/// Indicates that the argument should/shouldn't allow None (e.g. for nullable pointer args)
arg &none(bool flag = true) {
flag_none = flag;
return *this;
}
const char *name; ///< If non-null, this is a named kwargs argument
bool flag_noconvert : 1; ///< If set, do not allow conversion (requires a supporting type
///< caster!)
bool flag_none : 1; ///< If set (the default), allow None to be passed to this argument
};
/// \ingroup annotations
/// Annotation for arguments with values
struct arg_v : arg {
private:
template <typename T>
arg_v(arg &&base, T &&x, const char *descr = nullptr)
: arg(base), value(reinterpret_steal<object>(detail::make_caster<T>::cast(
std::forward<T>(x), return_value_policy::automatic, {}))),
descr(descr)
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
,
type(type_id<T>())
#endif
{
// Workaround! See:
// https://github.com/pybind/pybind11/issues/2336
// https://github.com/pybind/pybind11/pull/2685#issuecomment-731286700
if (PyErr_Occurred()) {
PyErr_Clear();
}
}
public:
/// Direct construction with name, default, and description
template <typename T>
arg_v(const char *name, T &&x, const char *descr = nullptr)
: arg_v(arg(name), std::forward<T>(x), descr) {}
/// Called internally when invoking `py::arg("a") = value`
template <typename T>
arg_v(const arg &base, T &&x, const char *descr = nullptr)
: arg_v(arg(base), std::forward<T>(x), descr) {}
/// Same as `arg::noconvert()`, but returns *this as arg_v&, not arg&
arg_v &noconvert(bool flag = true) {
arg::noconvert(flag);
return *this;
}
/// Same as `arg::nonone()`, but returns *this as arg_v&, not arg&
arg_v &none(bool flag = true) {
arg::none(flag);
return *this;
}
/// The default value
object value;
/// The (optional) description of the default value
const char *descr;
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
/// The C++ type name of the default value (only available when compiled in debug mode)
std::string type;
#endif
};
/// \ingroup annotations
/// Annotation indicating that all following arguments are keyword-only; the is the equivalent of
/// an unnamed '*' argument
struct kw_only {};
/// \ingroup annotations
/// Annotation indicating that all previous arguments are positional-only; the is the equivalent of
/// an unnamed '/' argument (in Python 3.8)
struct pos_only {};
template <typename T>
arg_v arg::operator=(T &&value) const {
return {*this, std::forward<T>(value)};
}
/// Alias for backward compatibility -- to be removed in version 2.0
template <typename /*unused*/>
using arg_t = arg_v;
inline namespace literals {
/** \rst
String literal version of `arg`
\endrst */
constexpr arg
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 5
operator"" _a // gcc 4.8.5 insists on having a space (hard error).
#else
operator""_a // clang 17 generates a deprecation warning if there is a space.
#endif
(const char *name, size_t) {
return arg(name);
}
} // namespace literals
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename T>
using is_kw_only = std::is_same<intrinsic_t<T>, kw_only>;
template <typename T>
using is_pos_only = std::is_same<intrinsic_t<T>, pos_only>;
// forward declaration (definition in attr.h)
struct function_record;
/// Internal data associated with a single function call
struct function_call {
function_call(const function_record &f, handle p); // Implementation in attr.h
/// The function data:
const function_record &func;
/// Arguments passed to the function:
std::vector<handle> args;
/// The `convert` value the arguments should be loaded with
std::vector<bool> args_convert;
/// Extra references for the optional `py::args` and/or `py::kwargs` arguments (which, if
/// present, are also in `args` but without a reference).
object args_ref, kwargs_ref;
/// The parent, if any
handle parent;
/// If this is a call to an initializer, this argument contains `self`
handle init_self;
};
/// Helper class which loads arguments for C++ functions called from Python
template <typename... Args>
class argument_loader {
using indices = make_index_sequence<sizeof...(Args)>;
template <typename Arg>
using argument_is_args = std::is_same<intrinsic_t<Arg>, args>;
template <typename Arg>
using argument_is_kwargs = std::is_same<intrinsic_t<Arg>, kwargs>;
// Get kwargs argument position, or -1 if not present:
static constexpr auto kwargs_pos = constexpr_last<argument_is_kwargs, Args...>();
static_assert(kwargs_pos == -1 || kwargs_pos == (int) sizeof...(Args) - 1,
"py::kwargs is only permitted as the last argument of a function");
public:
static constexpr bool has_kwargs = kwargs_pos != -1;
// py::args argument position; -1 if not present.
static constexpr int args_pos = constexpr_last<argument_is_args, Args...>();
static_assert(args_pos == -1 || args_pos == constexpr_first<argument_is_args, Args...>(),
"py::args cannot be specified more than once");
static constexpr auto arg_names
= ::pybind11::detail::concat(type_descr(make_caster<Args>::name)...);
bool load_args(function_call &call) { return load_impl_sequence(call, indices{}); }
template <typename Return, typename Guard, typename Func>
// NOLINTNEXTLINE(readability-const-return-type)
enable_if_t<!std::is_void<Return>::value, Return> call(Func &&f) && {
return std::move(*this).template call_impl<remove_cv_t<Return>>(
std::forward<Func>(f), indices{}, Guard{});
}
template <typename Return, typename Guard, typename Func>
enable_if_t<std::is_void<Return>::value, void_type> call(Func &&f) && {
std::move(*this).template call_impl<remove_cv_t<Return>>(
std::forward<Func>(f), indices{}, Guard{});
return void_type();
}
private:
static bool load_impl_sequence(function_call &, index_sequence<>) { return true; }
template <size_t... Is>
bool load_impl_sequence(function_call &call, index_sequence<Is...>) {
#ifdef __cpp_fold_expressions
if ((... || !std::get<Is>(argcasters).load(call.args[Is], call.args_convert[Is]))) {
return false;
}
#else
for (bool r : {std::get<Is>(argcasters).load(call.args[Is], call.args_convert[Is])...}) {
if (!r) {
return false;
}
}
#endif
return true;
}
template <typename Return, typename Func, size_t... Is, typename Guard>
Return call_impl(Func &&f, index_sequence<Is...>, Guard &&) && {
return std::forward<Func>(f)(cast_op<Args>(std::move(std::get<Is>(argcasters)))...);
}
std::tuple<make_caster<Args>...> argcasters;
};
/// Helper class which collects only positional arguments for a Python function call.
/// A fancier version below can collect any argument, but this one is optimal for simple calls.
template <return_value_policy policy>
class simple_collector {
public:
template <typename... Ts>
explicit simple_collector(Ts &&...values)
: m_args(pybind11::make_tuple<policy>(std::forward<Ts>(values)...)) {}
const tuple &args() const & { return m_args; }
dict kwargs() const { return {}; }
tuple args() && { return std::move(m_args); }
/// Call a Python function and pass the collected arguments
object call(PyObject *ptr) const {
PyObject *result = PyObject_CallObject(ptr, m_args.ptr());
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
private:
tuple m_args;
};
/// Helper class which collects positional, keyword, * and ** arguments for a Python function call
template <return_value_policy policy>
class unpacking_collector {
public:
template <typename... Ts>
explicit unpacking_collector(Ts &&...values) {
// Tuples aren't (easily) resizable so a list is needed for collection,
// but the actual function call strictly requires a tuple.
auto args_list = list();
using expander = int[];
(void) expander{0, (process(args_list, std::forward<Ts>(values)), 0)...};
m_args = std::move(args_list);
}
const tuple &args() const & { return m_args; }
const dict &kwargs() const & { return m_kwargs; }
tuple args() && { return std::move(m_args); }
dict kwargs() && { return std::move(m_kwargs); }
/// Call a Python function and pass the collected arguments
object call(PyObject *ptr) const {
PyObject *result = PyObject_Call(ptr, m_args.ptr(), m_kwargs.ptr());
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
private:
template <typename T>
void process(list &args_list, T &&x) {
auto o = reinterpret_steal<object>(
detail::make_caster<T>::cast(std::forward<T>(x), policy, {}));
if (!o) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
throw cast_error_unable_to_convert_call_arg(std::to_string(args_list.size()));
#else
throw cast_error_unable_to_convert_call_arg(std::to_string(args_list.size()),
type_id<T>());
#endif
}
args_list.append(std::move(o));
}
void process(list &args_list, detail::args_proxy ap) {
for (auto a : ap) {
args_list.append(a);
}
}
void process(list & /*args_list*/, arg_v a) {
if (!a.name) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
nameless_argument_error();
#else
nameless_argument_error(a.type);
#endif
}
if (m_kwargs.contains(a.name)) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
multiple_values_error();
#else
multiple_values_error(a.name);
#endif
}
if (!a.value) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
throw cast_error_unable_to_convert_call_arg(a.name);
#else
throw cast_error_unable_to_convert_call_arg(a.name, a.type);
#endif
}
m_kwargs[a.name] = std::move(a.value);
}
void process(list & /*args_list*/, detail::kwargs_proxy kp) {
if (!kp) {
return;
}
for (auto k : reinterpret_borrow<dict>(kp)) {
if (m_kwargs.contains(k.first)) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
multiple_values_error();
#else
multiple_values_error(str(k.first));
#endif
}
m_kwargs[k.first] = k.second;
}
}
[[noreturn]] static void nameless_argument_error() {
throw type_error(
"Got kwargs without a name; only named arguments "
"may be passed via py::arg() to a python function call. "
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
}
[[noreturn]] static void nameless_argument_error(const std::string &type) {
throw type_error("Got kwargs without a name of type '" + type
+ "'; only named "
"arguments may be passed via py::arg() to a python function call. ");
}
[[noreturn]] static void multiple_values_error() {
throw type_error(
"Got multiple values for keyword argument "
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
}
[[noreturn]] static void multiple_values_error(const std::string &name) {
throw type_error("Got multiple values for keyword argument '" + name + "'");
}
private:
tuple m_args;
dict m_kwargs;
};
// [workaround(intel)] Separate function required here
// We need to put this into a separate function because the Intel compiler
// fails to compile enable_if_t<!all_of<is_positional<Args>...>::value>
// (tested with ICC 2021.1 Beta 20200827).
template <typename... Args>
constexpr bool args_are_all_positional() {
return all_of<is_positional<Args>...>::value;
}
/// Collect only positional arguments for a Python function call
template <return_value_policy policy,
typename... Args,
typename = enable_if_t<args_are_all_positional<Args...>()>>
simple_collector<policy> collect_arguments(Args &&...args) {
return simple_collector<policy>(std::forward<Args>(args)...);
}
/// Collect all arguments, including keywords and unpacking (only instantiated when needed)
template <return_value_policy policy,
typename... Args,
typename = enable_if_t<!args_are_all_positional<Args...>()>>
unpacking_collector<policy> collect_arguments(Args &&...args) {
// Following argument order rules for generalized unpacking according to PEP 448
static_assert(constexpr_last<is_positional, Args...>()
< constexpr_first<is_keyword_or_ds, Args...>()
&& constexpr_last<is_s_unpacking, Args...>()
< constexpr_first<is_ds_unpacking, Args...>(),
"Invalid function call: positional args must precede keywords and ** unpacking; "
"* unpacking must precede ** unpacking");
return unpacking_collector<policy>(std::forward<Args>(args)...);
}
template <typename Derived>
template <return_value_policy policy, typename... Args>
object object_api<Derived>::operator()(Args &&...args) const {
#ifndef NDEBUG
if (!PyGILState_Check()) {
pybind11_fail("pybind11::object_api<>::operator() PyGILState_Check() failure.");
}
#endif
return detail::collect_arguments<policy>(std::forward<Args>(args)...).call(derived().ptr());
}
template <typename Derived>
template <return_value_policy policy, typename... Args>
object object_api<Derived>::call(Args &&...args) const {
return operator()<policy>(std::forward<Args>(args)...);
}
PYBIND11_NAMESPACE_END(detail)
template <typename T>
handle type::handle_of() {
static_assert(std::is_base_of<detail::type_caster_generic, detail::make_caster<T>>::value,
"py::type::of<T> only supports the case where T is a registered C++ types.");
return detail::get_type_handle(typeid(T), true);
}
#define PYBIND11_MAKE_OPAQUE(...) \
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) \
namespace detail { \
template <> \
class type_caster<__VA_ARGS__> : public type_caster_base<__VA_ARGS__> {}; \
} \
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
/// Lets you pass a type containing a `,` through a macro parameter without needing a separate
/// typedef, e.g.:
/// `PYBIND11_OVERRIDE(PYBIND11_TYPE(ReturnType<A, B>), PYBIND11_TYPE(Parent<C, D>), f, arg)`
#define PYBIND11_TYPE(...) __VA_ARGS__
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=======================================================================================================================
SOURCE CODE FILE: chrono.h
LINES: 1
SIZE: 8.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\chrono.h
ENCODING: utf-8
```h
/*
pybind11/chrono.h: Transparent conversion between std::chrono and python's datetime
Copyright (c) 2016 Trent Houliston <[email protected]> and
Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
#include <chrono>
#include <cmath>
#include <ctime>
#include <datetime.h>
#include <mutex>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename type>
class duration_caster {
public:
using rep = typename type::rep;
using period = typename type::period;
// signed 25 bits required by the standard.
using days = std::chrono::duration<int_least32_t, std::ratio<86400>>;
bool load(handle src, bool) {
using namespace std::chrono;
// Lazy initialise the PyDateTime import
if (!PyDateTimeAPI) {
PyDateTime_IMPORT;
}
if (!src) {
return false;
}
// If invoked with datetime.delta object
if (PyDelta_Check(src.ptr())) {
value = type(duration_cast<duration<rep, period>>(
days(PyDateTime_DELTA_GET_DAYS(src.ptr()))
+ seconds(PyDateTime_DELTA_GET_SECONDS(src.ptr()))
+ microseconds(PyDateTime_DELTA_GET_MICROSECONDS(src.ptr()))));
return true;
}
// If invoked with a float we assume it is seconds and convert
if (PyFloat_Check(src.ptr())) {
value = type(duration_cast<duration<rep, period>>(
duration<double>(PyFloat_AsDouble(src.ptr()))));
return true;
}
return false;
}
// If this is a duration just return it back
static const std::chrono::duration<rep, period> &
get_duration(const std::chrono::duration<rep, period> &src) {
return src;
}
// If this is a time_point get the time_since_epoch
template <typename Clock>
static std::chrono::duration<rep, period>
get_duration(const std::chrono::time_point<Clock, std::chrono::duration<rep, period>> &src) {
return src.time_since_epoch();
}
static handle cast(const type &src, return_value_policy /* policy */, handle /* parent */) {
using namespace std::chrono;
// Use overloaded function to get our duration from our source
// Works out if it is a duration or time_point and get the duration
auto d = get_duration(src);
// Lazy initialise the PyDateTime import
if (!PyDateTimeAPI) {
PyDateTime_IMPORT;
}
// Declare these special duration types so the conversions happen with the correct
// primitive types (int)
using dd_t = duration<int, std::ratio<86400>>;
using ss_t = duration<int, std::ratio<1>>;
using us_t = duration<int, std::micro>;
auto dd = duration_cast<dd_t>(d);
auto subd = d - dd;
auto ss = duration_cast<ss_t>(subd);
auto us = duration_cast<us_t>(subd - ss);
return PyDelta_FromDSU(dd.count(), ss.count(), us.count());
}
PYBIND11_TYPE_CASTER(type, const_name("datetime.timedelta"));
};
inline std::tm *localtime_thread_safe(const std::time_t *time, std::tm *buf) {
#if (defined(__STDC_LIB_EXT1__) && defined(__STDC_WANT_LIB_EXT1__)) || defined(_MSC_VER)
if (localtime_s(buf, time))
return nullptr;
return buf;
#else
static std::mutex mtx;
std::lock_guard<std::mutex> lock(mtx);
std::tm *tm_ptr = std::localtime(time);
if (tm_ptr != nullptr) {
*buf = *tm_ptr;
}
return tm_ptr;
#endif
}
// This is for casting times on the system clock into datetime.datetime instances
template <typename Duration>
class type_caster<std::chrono::time_point<std::chrono::system_clock, Duration>> {
public:
using type = std::chrono::time_point<std::chrono::system_clock, Duration>;
bool load(handle src, bool) {
using namespace std::chrono;
// Lazy initialise the PyDateTime import
if (!PyDateTimeAPI) {
PyDateTime_IMPORT;
}
if (!src) {
return false;
}
std::tm cal;
microseconds msecs;
if (PyDateTime_Check(src.ptr())) {
cal.tm_sec = PyDateTime_DATE_GET_SECOND(src.ptr());
cal.tm_min = PyDateTime_DATE_GET_MINUTE(src.ptr());
cal.tm_hour = PyDateTime_DATE_GET_HOUR(src.ptr());
cal.tm_mday = PyDateTime_GET_DAY(src.ptr());
cal.tm_mon = PyDateTime_GET_MONTH(src.ptr()) - 1;
cal.tm_year = PyDateTime_GET_YEAR(src.ptr()) - 1900;
cal.tm_isdst = -1;
msecs = microseconds(PyDateTime_DATE_GET_MICROSECOND(src.ptr()));
} else if (PyDate_Check(src.ptr())) {
cal.tm_sec = 0;
cal.tm_min = 0;
cal.tm_hour = 0;
cal.tm_mday = PyDateTime_GET_DAY(src.ptr());
cal.tm_mon = PyDateTime_GET_MONTH(src.ptr()) - 1;
cal.tm_year = PyDateTime_GET_YEAR(src.ptr()) - 1900;
cal.tm_isdst = -1;
msecs = microseconds(0);
} else if (PyTime_Check(src.ptr())) {
cal.tm_sec = PyDateTime_TIME_GET_SECOND(src.ptr());
cal.tm_min = PyDateTime_TIME_GET_MINUTE(src.ptr());
cal.tm_hour = PyDateTime_TIME_GET_HOUR(src.ptr());
cal.tm_mday = 1; // This date (day, month, year) = (1, 0, 70)
cal.tm_mon = 0; // represents 1-Jan-1970, which is the first
cal.tm_year = 70; // earliest available date for Python's datetime
cal.tm_isdst = -1;
msecs = microseconds(PyDateTime_TIME_GET_MICROSECOND(src.ptr()));
} else {
return false;
}
value = time_point_cast<Duration>(system_clock::from_time_t(std::mktime(&cal)) + msecs);
return true;
}
static handle cast(const std::chrono::time_point<std::chrono::system_clock, Duration> &src,
return_value_policy /* policy */,
handle /* parent */) {
using namespace std::chrono;
// Lazy initialise the PyDateTime import
if (!PyDateTimeAPI) {
PyDateTime_IMPORT;
}
// Get out microseconds, and make sure they are positive, to avoid bug in eastern
// hemisphere time zones (cfr. https://github.com/pybind/pybind11/issues/2417)
using us_t = duration<int, std::micro>;
auto us = duration_cast<us_t>(src.time_since_epoch() % seconds(1));
if (us.count() < 0) {
us += seconds(1);
}
// Subtract microseconds BEFORE `system_clock::to_time_t`, because:
// > If std::time_t has lower precision, it is implementation-defined whether the value is
// rounded or truncated. (https://en.cppreference.com/w/cpp/chrono/system_clock/to_time_t)
std::time_t tt
= system_clock::to_time_t(time_point_cast<system_clock::duration>(src - us));
std::tm localtime;
std::tm *localtime_ptr = localtime_thread_safe(&tt, &localtime);
if (!localtime_ptr) {
throw cast_error("Unable to represent system_clock in local time");
}
return PyDateTime_FromDateAndTime(localtime.tm_year + 1900,
localtime.tm_mon + 1,
localtime.tm_mday,
localtime.tm_hour,
localtime.tm_min,
localtime.tm_sec,
us.count());
}
PYBIND11_TYPE_CASTER(type, const_name("datetime.datetime"));
};
// Other clocks that are not the system clock are not measured as datetime.datetime objects
// since they are not measured on calendar time. So instead we just make them timedeltas
// Or if they have passed us a time as a float we convert that
template <typename Clock, typename Duration>
class type_caster<std::chrono::time_point<Clock, Duration>>
: public duration_caster<std::chrono::time_point<Clock, Duration>> {};
template <typename Rep, typename Period>
class type_caster<std::chrono::duration<Rep, Period>>
: public duration_caster<std::chrono::duration<Rep, Period>> {};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=======================================================================================================================
SOURCE CODE FILE: common.h
LINES: 1
SIZE: 0.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\common.h
ENCODING: utf-8
```h
#include "detail/common.h"
#warning "Including 'common.h' is deprecated. It will be removed in v3.0. Use 'pybind11.h'."
```
|
========================================================================================================================
SOURCE CODE FILE: complex.h
LINES: 1
SIZE: 2.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\complex.h
ENCODING: utf-8
```h
/*
pybind11/complex.h: Complex number support
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
#include <complex>
/// glibc defines I as a macro which breaks things, e.g., boost template names
#ifdef I
# undef I
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
template <typename T>
struct format_descriptor<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {
static constexpr const char c = format_descriptor<T>::c;
static constexpr const char value[3] = {'Z', c, '\0'};
static std::string format() { return std::string(value); }
};
#ifndef PYBIND11_CPP17
template <typename T>
constexpr const char
format_descriptor<std::complex<T>,
detail::enable_if_t<std::is_floating_point<T>::value>>::value[3];
#endif
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename T>
struct is_fmt_numeric<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {
static constexpr bool value = true;
static constexpr int index = is_fmt_numeric<T>::index + 3;
};
template <typename T>
class type_caster<std::complex<T>> {
public:
bool load(handle src, bool convert) {
if (!src) {
return false;
}
if (!convert && !PyComplex_Check(src.ptr())) {
return false;
}
Py_complex result = PyComplex_AsCComplex(src.ptr());
if (result.real == -1.0 && PyErr_Occurred()) {
PyErr_Clear();
return false;
}
value = std::complex<T>((T) result.real, (T) result.imag);
return true;
}
static handle
cast(const std::complex<T> &src, return_value_policy /* policy */, handle /* parent */) {
return PyComplex_FromDoubles((double) src.real(), (double) src.imag());
}
PYBIND11_TYPE_CASTER(std::complex<T>, const_name("complex"));
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=============================================================================================================================
SOURCE CODE FILE: class.h
LINES: 1
SIZE: 29.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\detail\class.h
ENCODING: utf-8
```h
/*
pybind11/detail/class.h: Python C API implementation details for py::class_
Copyright (c) 2017 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <pybind11/attr.h>
#include <pybind11/options.h>
#include "exception_translation.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
#if !defined(PYPY_VERSION)
# define PYBIND11_BUILTIN_QUALNAME
# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj)
#else
// In PyPy, we still set __qualname__ so that we can produce reliable function type
// signatures; in CPython this macro expands to nothing:
# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj) \
setattr((PyObject *) obj, "__qualname__", nameobj)
#endif
inline std::string get_fully_qualified_tp_name(PyTypeObject *type) {
#if !defined(PYPY_VERSION)
return type->tp_name;
#else
auto module_name = handle((PyObject *) type).attr("__module__").cast<std::string>();
if (module_name == PYBIND11_BUILTINS_MODULE)
return type->tp_name;
else
return std::move(module_name) + "." + type->tp_name;
#endif
}
inline PyTypeObject *type_incref(PyTypeObject *type) {
Py_INCREF(type);
return type;
}
#if !defined(PYPY_VERSION)
/// `pybind11_static_property.__get__()`: Always pass the class instead of the instance.
extern "C" inline PyObject *pybind11_static_get(PyObject *self, PyObject * /*ob*/, PyObject *cls) {
return PyProperty_Type.tp_descr_get(self, cls, cls);
}
/// `pybind11_static_property.__set__()`: Just like the above `__get__()`.
extern "C" inline int pybind11_static_set(PyObject *self, PyObject *obj, PyObject *value) {
PyObject *cls = PyType_Check(obj) ? obj : (PyObject *) Py_TYPE(obj);
return PyProperty_Type.tp_descr_set(self, cls, value);
}
// Forward declaration to use in `make_static_property_type()`
inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type);
/** A `static_property` is the same as a `property` but the `__get__()` and `__set__()`
methods are modified to always use the object type instead of a concrete instance.
Return value: New reference. */
inline PyTypeObject *make_static_property_type() {
constexpr auto *name = "pybind11_static_property";
auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));
/* Danger zone: from now (and until PyType_Ready), make sure to
issue no Python C API calls which could potentially invoke the
garbage collector (the GC will call type_traverse(), which will in
turn find the newly constructed type in an invalid state) */
auto *heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0);
if (!heap_type) {
pybind11_fail("make_static_property_type(): error allocating type!");
}
heap_type->ht_name = name_obj.inc_ref().ptr();
# ifdef PYBIND11_BUILTIN_QUALNAME
heap_type->ht_qualname = name_obj.inc_ref().ptr();
# endif
auto *type = &heap_type->ht_type;
type->tp_name = name;
type->tp_base = type_incref(&PyProperty_Type);
type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
type->tp_descr_get = pybind11_static_get;
type->tp_descr_set = pybind11_static_set;
# if PY_VERSION_HEX >= 0x030C0000
// Since Python-3.12 property-derived types are required to
// have dynamic attributes (to set `__doc__`)
enable_dynamic_attributes(heap_type);
# endif
if (PyType_Ready(type) < 0) {
pybind11_fail("make_static_property_type(): failure in PyType_Ready()!");
}
setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
return type;
}
#else // PYPY
/** PyPy has some issues with the above C API, so we evaluate Python code instead.
This function will only be called once so performance isn't really a concern.
Return value: New reference. */
inline PyTypeObject *make_static_property_type() {
auto d = dict();
PyObject *result = PyRun_String(R"(\
class pybind11_static_property(property):
def __get__(self, obj, cls):
return property.__get__(self, cls, cls)
def __set__(self, obj, value):
cls = obj if isinstance(obj, type) else type(obj)
property.__set__(self, cls, value)
)",
Py_file_input,
d.ptr(),
d.ptr());
if (result == nullptr)
throw error_already_set();
Py_DECREF(result);
return (PyTypeObject *) d["pybind11_static_property"].cast<object>().release().ptr();
}
#endif // PYPY
/** Types with static properties need to handle `Type.static_prop = x` in a specific way.
By default, Python replaces the `static_property` itself, but for wrapped C++ types
we need to call `static_property.__set__()` in order to propagate the new value to
the underlying C++ data structure. */
extern "C" inline int pybind11_meta_setattro(PyObject *obj, PyObject *name, PyObject *value) {
// Use `_PyType_Lookup()` instead of `PyObject_GetAttr()` in order to get the raw
// descriptor (`property`) instead of calling `tp_descr_get` (`property.__get__()`).
PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name);
// The following assignment combinations are possible:
// 1. `Type.static_prop = value` --> descr_set: `Type.static_prop.__set__(value)`
// 2. `Type.static_prop = other_static_prop` --> setattro: replace existing `static_prop`
// 3. `Type.regular_attribute = value` --> setattro: regular attribute assignment
auto *const static_prop = (PyObject *) get_internals().static_property_type;
const auto call_descr_set = (descr != nullptr) && (value != nullptr)
&& (PyObject_IsInstance(descr, static_prop) != 0)
&& (PyObject_IsInstance(value, static_prop) == 0);
if (call_descr_set) {
// Call `static_property.__set__()` instead of replacing the `static_property`.
#if !defined(PYPY_VERSION)
return Py_TYPE(descr)->tp_descr_set(descr, obj, value);
#else
if (PyObject *result = PyObject_CallMethod(descr, "__set__", "OO", obj, value)) {
Py_DECREF(result);
return 0;
} else {
return -1;
}
#endif
} else {
// Replace existing attribute.
return PyType_Type.tp_setattro(obj, name, value);
}
}
/**
* Python 3's PyInstanceMethod_Type hides itself via its tp_descr_get, which prevents aliasing
* methods via cls.attr("m2") = cls.attr("m1"): instead the tp_descr_get returns a plain function,
* when called on a class, or a PyMethod, when called on an instance. Override that behaviour here
* to do a special case bypass for PyInstanceMethod_Types.
*/
extern "C" inline PyObject *pybind11_meta_getattro(PyObject *obj, PyObject *name) {
PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name);
if (descr && PyInstanceMethod_Check(descr)) {
Py_INCREF(descr);
return descr;
}
return PyType_Type.tp_getattro(obj, name);
}
/// metaclass `__call__` function that is used to create all pybind11 objects.
extern "C" inline PyObject *pybind11_meta_call(PyObject *type, PyObject *args, PyObject *kwargs) {
// use the default metaclass call to create/initialize the object
PyObject *self = PyType_Type.tp_call(type, args, kwargs);
if (self == nullptr) {
return nullptr;
}
// Ensure that the base __init__ function(s) were called
values_and_holders vhs(self);
for (const auto &vh : vhs) {
if (!vh.holder_constructed() && !vhs.is_redundant_value_and_holder(vh)) {
PyErr_Format(PyExc_TypeError,
"%.200s.__init__() must be called when overriding __init__",
get_fully_qualified_tp_name(vh.type->type).c_str());
Py_DECREF(self);
return nullptr;
}
}
return self;
}
/// Cleanup the type-info for a pybind11-registered type.
extern "C" inline void pybind11_meta_dealloc(PyObject *obj) {
with_internals([obj](internals &internals) {
auto *type = (PyTypeObject *) obj;
// A pybind11-registered type will:
// 1) be found in internals.registered_types_py
// 2) have exactly one associated `detail::type_info`
auto found_type = internals.registered_types_py.find(type);
if (found_type != internals.registered_types_py.end() && found_type->second.size() == 1
&& found_type->second[0]->type == type) {
auto *tinfo = found_type->second[0];
auto tindex = std::type_index(*tinfo->cpptype);
internals.direct_conversions.erase(tindex);
if (tinfo->module_local) {
get_local_internals().registered_types_cpp.erase(tindex);
} else {
internals.registered_types_cpp.erase(tindex);
}
internals.registered_types_py.erase(tinfo->type);
// Actually just `std::erase_if`, but that's only available in C++20
auto &cache = internals.inactive_override_cache;
for (auto it = cache.begin(), last = cache.end(); it != last;) {
if (it->first == (PyObject *) tinfo->type) {
it = cache.erase(it);
} else {
++it;
}
}
delete tinfo;
}
});
PyType_Type.tp_dealloc(obj);
}
/** This metaclass is assigned by default to all pybind11 types and is required in order
for static properties to function correctly. Users may override this using `py::metaclass`.
Return value: New reference. */
inline PyTypeObject *make_default_metaclass() {
constexpr auto *name = "pybind11_type";
auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));
/* Danger zone: from now (and until PyType_Ready), make sure to
issue no Python C API calls which could potentially invoke the
garbage collector (the GC will call type_traverse(), which will in
turn find the newly constructed type in an invalid state) */
auto *heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0);
if (!heap_type) {
pybind11_fail("make_default_metaclass(): error allocating metaclass!");
}
heap_type->ht_name = name_obj.inc_ref().ptr();
#ifdef PYBIND11_BUILTIN_QUALNAME
heap_type->ht_qualname = name_obj.inc_ref().ptr();
#endif
auto *type = &heap_type->ht_type;
type->tp_name = name;
type->tp_base = type_incref(&PyType_Type);
type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
type->tp_call = pybind11_meta_call;
type->tp_setattro = pybind11_meta_setattro;
type->tp_getattro = pybind11_meta_getattro;
type->tp_dealloc = pybind11_meta_dealloc;
if (PyType_Ready(type) < 0) {
pybind11_fail("make_default_metaclass(): failure in PyType_Ready()!");
}
setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
return type;
}
/// For multiple inheritance types we need to recursively register/deregister base pointers for any
/// base classes with pointers that are difference from the instance value pointer so that we can
/// correctly recognize an offset base class pointer. This calls a function with any offset base
/// ptrs.
inline void traverse_offset_bases(void *valueptr,
const detail::type_info *tinfo,
instance *self,
bool (*f)(void * /*parentptr*/, instance * /*self*/)) {
for (handle h : reinterpret_borrow<tuple>(tinfo->type->tp_bases)) {
if (auto *parent_tinfo = get_type_info((PyTypeObject *) h.ptr())) {
for (auto &c : parent_tinfo->implicit_casts) {
if (c.first == tinfo->cpptype) {
auto *parentptr = c.second(valueptr);
if (parentptr != valueptr) {
f(parentptr, self);
}
traverse_offset_bases(parentptr, parent_tinfo, self, f);
break;
}
}
}
}
}
inline bool register_instance_impl(void *ptr, instance *self) {
with_instance_map(ptr, [&](instance_map &instances) { instances.emplace(ptr, self); });
return true; // unused, but gives the same signature as the deregister func
}
inline bool deregister_instance_impl(void *ptr, instance *self) {
return with_instance_map(ptr, [&](instance_map &instances) {
auto range = instances.equal_range(ptr);
for (auto it = range.first; it != range.second; ++it) {
if (self == it->second) {
instances.erase(it);
return true;
}
}
return false;
});
}
inline void register_instance(instance *self, void *valptr, const type_info *tinfo) {
register_instance_impl(valptr, self);
if (!tinfo->simple_ancestors) {
traverse_offset_bases(valptr, tinfo, self, register_instance_impl);
}
}
inline bool deregister_instance(instance *self, void *valptr, const type_info *tinfo) {
bool ret = deregister_instance_impl(valptr, self);
if (!tinfo->simple_ancestors) {
traverse_offset_bases(valptr, tinfo, self, deregister_instance_impl);
}
return ret;
}
/// Instance creation function for all pybind11 types. It allocates the internal instance layout
/// for holding C++ objects and holders. Allocation is done lazily (the first time the instance is
/// cast to a reference or pointer), and initialization is done by an `__init__` function.
inline PyObject *make_new_instance(PyTypeObject *type) {
#if defined(PYPY_VERSION)
// PyPy gets tp_basicsize wrong (issue 2482) under multiple inheritance when the first
// inherited object is a plain Python type (i.e. not derived from an extension type). Fix it.
ssize_t instance_size = static_cast<ssize_t>(sizeof(instance));
if (type->tp_basicsize < instance_size) {
type->tp_basicsize = instance_size;
}
#endif
PyObject *self = type->tp_alloc(type, 0);
auto *inst = reinterpret_cast<instance *>(self);
// Allocate the value/holder internals:
inst->allocate_layout();
return self;
}
/// Instance creation function for all pybind11 types. It only allocates space for the
/// C++ object, but doesn't call the constructor -- an `__init__` function must do that.
extern "C" inline PyObject *pybind11_object_new(PyTypeObject *type, PyObject *, PyObject *) {
return make_new_instance(type);
}
/// An `__init__` function constructs the C++ object. Users should provide at least one
/// of these using `py::init` or directly with `.def(__init__, ...)`. Otherwise, the
/// following default function will be used which simply throws an exception.
extern "C" inline int pybind11_object_init(PyObject *self, PyObject *, PyObject *) {
PyTypeObject *type = Py_TYPE(self);
std::string msg = get_fully_qualified_tp_name(type) + ": No constructor defined!";
set_error(PyExc_TypeError, msg.c_str());
return -1;
}
inline void add_patient(PyObject *nurse, PyObject *patient) {
auto *instance = reinterpret_cast<detail::instance *>(nurse);
instance->has_patients = true;
Py_INCREF(patient);
with_internals([&](internals &internals) { internals.patients[nurse].push_back(patient); });
}
inline void clear_patients(PyObject *self) {
auto *instance = reinterpret_cast<detail::instance *>(self);
std::vector<PyObject *> patients;
with_internals([&](internals &internals) {
auto pos = internals.patients.find(self);
if (pos == internals.patients.end()) {
pybind11_fail(
"FATAL: Internal consistency check failed: Invalid clear_patients() call.");
}
// Clearing the patients can cause more Python code to run, which
// can invalidate the iterator. Extract the vector of patients
// from the unordered_map first.
patients = std::move(pos->second);
internals.patients.erase(pos);
});
instance->has_patients = false;
for (PyObject *&patient : patients) {
Py_CLEAR(patient);
}
}
/// Clears all internal data from the instance and removes it from registered instances in
/// preparation for deallocation.
inline void clear_instance(PyObject *self) {
auto *instance = reinterpret_cast<detail::instance *>(self);
// Deallocate any values/holders, if present:
for (auto &v_h : values_and_holders(instance)) {
if (v_h) {
// We have to deregister before we call dealloc because, for virtual MI types, we still
// need to be able to get the parent pointers.
if (v_h.instance_registered()
&& !deregister_instance(instance, v_h.value_ptr(), v_h.type)) {
pybind11_fail(
"pybind11_object_dealloc(): Tried to deallocate unregistered instance!");
}
if (instance->owned || v_h.holder_constructed()) {
v_h.type->dealloc(v_h);
}
}
}
// Deallocate the value/holder layout internals:
instance->deallocate_layout();
if (instance->weakrefs) {
PyObject_ClearWeakRefs(self);
}
PyObject **dict_ptr = _PyObject_GetDictPtr(self);
if (dict_ptr) {
Py_CLEAR(*dict_ptr);
}
if (instance->has_patients) {
clear_patients(self);
}
}
/// Instance destructor function for all pybind11 types. It calls `type_info.dealloc`
/// to destroy the C++ object itself, while the rest is Python bookkeeping.
extern "C" inline void pybind11_object_dealloc(PyObject *self) {
auto *type = Py_TYPE(self);
// If this is a GC tracked object, untrack it first
// Note that the track call is implicitly done by the
// default tp_alloc, which we never override.
if (PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC) != 0) {
PyObject_GC_UnTrack(self);
}
clear_instance(self);
type->tp_free(self);
#if PY_VERSION_HEX < 0x03080000
// `type->tp_dealloc != pybind11_object_dealloc` means that we're being called
// as part of a derived type's dealloc, in which case we're not allowed to decref
// the type here. For cross-module compatibility, we shouldn't compare directly
// with `pybind11_object_dealloc`, but with the common one stashed in internals.
auto pybind11_object_type = (PyTypeObject *) get_internals().instance_base;
if (type->tp_dealloc == pybind11_object_type->tp_dealloc)
Py_DECREF(type);
#else
// This was not needed before Python 3.8 (Python issue 35810)
// https://github.com/pybind/pybind11/issues/1946
Py_DECREF(type);
#endif
}
std::string error_string();
/** Create the type which can be used as a common base for all classes. This is
needed in order to satisfy Python's requirements for multiple inheritance.
Return value: New reference. */
inline PyObject *make_object_base_type(PyTypeObject *metaclass) {
constexpr auto *name = "pybind11_object";
auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));
/* Danger zone: from now (and until PyType_Ready), make sure to
issue no Python C API calls which could potentially invoke the
garbage collector (the GC will call type_traverse(), which will in
turn find the newly constructed type in an invalid state) */
auto *heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0);
if (!heap_type) {
pybind11_fail("make_object_base_type(): error allocating type!");
}
heap_type->ht_name = name_obj.inc_ref().ptr();
#ifdef PYBIND11_BUILTIN_QUALNAME
heap_type->ht_qualname = name_obj.inc_ref().ptr();
#endif
auto *type = &heap_type->ht_type;
type->tp_name = name;
type->tp_base = type_incref(&PyBaseObject_Type);
type->tp_basicsize = static_cast<ssize_t>(sizeof(instance));
type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
type->tp_new = pybind11_object_new;
type->tp_init = pybind11_object_init;
type->tp_dealloc = pybind11_object_dealloc;
/* Support weak references (needed for the keep_alive feature) */
type->tp_weaklistoffset = offsetof(instance, weakrefs);
if (PyType_Ready(type) < 0) {
pybind11_fail("PyType_Ready failed in make_object_base_type(): " + error_string());
}
setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
assert(!PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC));
return (PyObject *) heap_type;
}
/// dynamic_attr: Allow the garbage collector to traverse the internal instance `__dict__`.
extern "C" inline int pybind11_traverse(PyObject *self, visitproc visit, void *arg) {
#if PY_VERSION_HEX >= 0x030D0000
PyObject_VisitManagedDict(self, visit, arg);
#else
PyObject *&dict = *_PyObject_GetDictPtr(self);
Py_VISIT(dict);
#endif
// https://docs.python.org/3/c-api/typeobj.html#c.PyTypeObject.tp_traverse
#if PY_VERSION_HEX >= 0x03090000
Py_VISIT(Py_TYPE(self));
#endif
return 0;
}
/// dynamic_attr: Allow the GC to clear the dictionary.
extern "C" inline int pybind11_clear(PyObject *self) {
#if PY_VERSION_HEX >= 0x030D0000
PyObject_ClearManagedDict(self);
#else
PyObject *&dict = *_PyObject_GetDictPtr(self);
Py_CLEAR(dict);
#endif
return 0;
}
/// Give instances of this type a `__dict__` and opt into garbage collection.
inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type) {
auto *type = &heap_type->ht_type;
type->tp_flags |= Py_TPFLAGS_HAVE_GC;
#if PY_VERSION_HEX < 0x030B0000
type->tp_dictoffset = type->tp_basicsize; // place dict at the end
type->tp_basicsize += (ssize_t) sizeof(PyObject *); // and allocate enough space for it
#else
type->tp_flags |= Py_TPFLAGS_MANAGED_DICT;
#endif
type->tp_traverse = pybind11_traverse;
type->tp_clear = pybind11_clear;
static PyGetSetDef getset[]
= {{"__dict__", PyObject_GenericGetDict, PyObject_GenericSetDict, nullptr, nullptr},
{nullptr, nullptr, nullptr, nullptr, nullptr}};
type->tp_getset = getset;
}
/// buffer_protocol: Fill in the view as specified by flags.
extern "C" inline int pybind11_getbuffer(PyObject *obj, Py_buffer *view, int flags) {
// Look for a `get_buffer` implementation in this type's info or any bases (following MRO).
type_info *tinfo = nullptr;
for (auto type : reinterpret_borrow<tuple>(Py_TYPE(obj)->tp_mro)) {
tinfo = get_type_info((PyTypeObject *) type.ptr());
if (tinfo && tinfo->get_buffer) {
break;
}
}
if (view == nullptr || !tinfo || !tinfo->get_buffer) {
if (view) {
view->obj = nullptr;
}
set_error(PyExc_BufferError, "pybind11_getbuffer(): Internal error");
return -1;
}
std::memset(view, 0, sizeof(Py_buffer));
buffer_info *info = nullptr;
try {
info = tinfo->get_buffer(obj, tinfo->get_buffer_data);
} catch (...) {
try_translate_exceptions();
raise_from(PyExc_BufferError, "Error getting buffer");
return -1;
}
if (info == nullptr) {
pybind11_fail("FATAL UNEXPECTED SITUATION: tinfo->get_buffer() returned nullptr.");
}
if ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE && info->readonly) {
delete info;
// view->obj = nullptr; // Was just memset to 0, so not necessary
set_error(PyExc_BufferError, "Writable buffer requested for readonly storage");
return -1;
}
view->obj = obj;
view->ndim = 1;
view->internal = info;
view->buf = info->ptr;
view->itemsize = info->itemsize;
view->len = view->itemsize;
for (auto s : info->shape) {
view->len *= s;
}
view->readonly = static_cast<int>(info->readonly);
if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT) {
view->format = const_cast<char *>(info->format.c_str());
}
if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) {
view->ndim = (int) info->ndim;
view->strides = info->strides.data();
view->shape = info->shape.data();
}
Py_INCREF(view->obj);
return 0;
}
/// buffer_protocol: Release the resources of the buffer.
extern "C" inline void pybind11_releasebuffer(PyObject *, Py_buffer *view) {
delete (buffer_info *) view->internal;
}
/// Give this type a buffer interface.
inline void enable_buffer_protocol(PyHeapTypeObject *heap_type) {
heap_type->ht_type.tp_as_buffer = &heap_type->as_buffer;
heap_type->as_buffer.bf_getbuffer = pybind11_getbuffer;
heap_type->as_buffer.bf_releasebuffer = pybind11_releasebuffer;
}
/** Create a brand new Python type according to the `type_record` specification.
Return value: New reference. */
inline PyObject *make_new_python_type(const type_record &rec) {
auto name = reinterpret_steal<object>(PYBIND11_FROM_STRING(rec.name));
auto qualname = name;
if (rec.scope && !PyModule_Check(rec.scope.ptr()) && hasattr(rec.scope, "__qualname__")) {
qualname = reinterpret_steal<object>(
PyUnicode_FromFormat("%U.%U", rec.scope.attr("__qualname__").ptr(), name.ptr()));
}
object module_;
if (rec.scope) {
if (hasattr(rec.scope, "__module__")) {
module_ = rec.scope.attr("__module__");
} else if (hasattr(rec.scope, "__name__")) {
module_ = rec.scope.attr("__name__");
}
}
const auto *full_name = c_str(
#if !defined(PYPY_VERSION)
module_ ? str(module_).cast<std::string>() + "." + rec.name :
#endif
rec.name);
char *tp_doc = nullptr;
if (rec.doc && options::show_user_defined_docstrings()) {
/* Allocate memory for docstring (Python will free this later on) */
size_t size = std::strlen(rec.doc) + 1;
#if PY_VERSION_HEX >= 0x030D0000
tp_doc = (char *) PyMem_MALLOC(size);
#else
tp_doc = (char *) PyObject_MALLOC(size);
#endif
std::memcpy((void *) tp_doc, rec.doc, size);
}
auto &internals = get_internals();
auto bases = tuple(rec.bases);
auto *base = (bases.empty()) ? internals.instance_base : bases[0].ptr();
/* Danger zone: from now (and until PyType_Ready), make sure to
issue no Python C API calls which could potentially invoke the
garbage collector (the GC will call type_traverse(), which will in
turn find the newly constructed type in an invalid state) */
auto *metaclass
= rec.metaclass.ptr() ? (PyTypeObject *) rec.metaclass.ptr() : internals.default_metaclass;
auto *heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0);
if (!heap_type) {
pybind11_fail(std::string(rec.name) + ": Unable to create type object!");
}
heap_type->ht_name = name.release().ptr();
#ifdef PYBIND11_BUILTIN_QUALNAME
heap_type->ht_qualname = qualname.inc_ref().ptr();
#endif
auto *type = &heap_type->ht_type;
type->tp_name = full_name;
type->tp_doc = tp_doc;
type->tp_base = type_incref((PyTypeObject *) base);
type->tp_basicsize = static_cast<ssize_t>(sizeof(instance));
if (!bases.empty()) {
type->tp_bases = bases.release().ptr();
}
/* Don't inherit base __init__ */
type->tp_init = pybind11_object_init;
/* Supported protocols */
type->tp_as_number = &heap_type->as_number;
type->tp_as_sequence = &heap_type->as_sequence;
type->tp_as_mapping = &heap_type->as_mapping;
type->tp_as_async = &heap_type->as_async;
/* Flags */
type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE;
if (!rec.is_final) {
type->tp_flags |= Py_TPFLAGS_BASETYPE;
}
if (rec.dynamic_attr) {
enable_dynamic_attributes(heap_type);
}
if (rec.buffer_protocol) {
enable_buffer_protocol(heap_type);
}
if (rec.custom_type_setup_callback) {
rec.custom_type_setup_callback(heap_type);
}
if (PyType_Ready(type) < 0) {
pybind11_fail(std::string(rec.name) + ": PyType_Ready failed: " + error_string());
}
assert(!rec.dynamic_attr || PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC));
/* Register type with the parent scope */
if (rec.scope) {
setattr(rec.scope, rec.name, (PyObject *) type);
} else {
Py_INCREF(type); // Keep it alive forever (reference leak)
}
if (module_) { // Needed by pydoc
setattr((PyObject *) type, "__module__", module_);
}
PYBIND11_SET_OLDPY_QUALNAME(type, qualname);
return (PyObject *) type;
}
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
==============================================================================================================================
SOURCE CODE FILE: common.h
LINES: 1
SIZE: 54.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\detail\common.h
ENCODING: utf-8
```h
/*
pybind11/detail/common.h -- Basic macros
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#define PYBIND11_VERSION_MAJOR 2
#define PYBIND11_VERSION_MINOR 13
#define PYBIND11_VERSION_PATCH 6
// Similar to Python's convention: https://docs.python.org/3/c-api/apiabiversion.html
// Additional convention: 0xD = dev
#define PYBIND11_VERSION_HEX 0x020D0600
// Define some generic pybind11 helper macros for warning management.
//
// Note that compiler-specific push/pop pairs are baked into the
// PYBIND11_NAMESPACE_BEGIN/PYBIND11_NAMESPACE_END pair of macros. Therefore manual
// PYBIND11_WARNING_PUSH/PYBIND11_WARNING_POP are usually only needed in `#include` sections.
//
// If you find you need to suppress a warning, please try to make the suppression as local as
// possible using these macros. Please also be sure to push/pop with the pybind11 macros. Please
// only use compiler specifics if you need to check specific versions, e.g. Apple Clang vs. vanilla
// Clang.
#if defined(_MSC_VER)
# define PYBIND11_COMPILER_MSVC
# define PYBIND11_PRAGMA(...) __pragma(__VA_ARGS__)
# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(warning(push))
# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(warning(pop))
#elif defined(__INTEL_COMPILER)
# define PYBIND11_COMPILER_INTEL
# define PYBIND11_PRAGMA(...) _Pragma(#__VA_ARGS__)
# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(warning push)
# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(warning pop)
#elif defined(__clang__)
# define PYBIND11_COMPILER_CLANG
# define PYBIND11_PRAGMA(...) _Pragma(#__VA_ARGS__)
# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(clang diagnostic push)
# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(clang diagnostic push)
#elif defined(__GNUC__)
# define PYBIND11_COMPILER_GCC
# define PYBIND11_PRAGMA(...) _Pragma(#__VA_ARGS__)
# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(GCC diagnostic push)
# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(GCC diagnostic pop)
#endif
#ifdef PYBIND11_COMPILER_MSVC
# define PYBIND11_WARNING_DISABLE_MSVC(name) PYBIND11_PRAGMA(warning(disable : name))
#else
# define PYBIND11_WARNING_DISABLE_MSVC(name)
#endif
#ifdef PYBIND11_COMPILER_CLANG
# define PYBIND11_WARNING_DISABLE_CLANG(name) PYBIND11_PRAGMA(clang diagnostic ignored name)
#else
# define PYBIND11_WARNING_DISABLE_CLANG(name)
#endif
#ifdef PYBIND11_COMPILER_GCC
# define PYBIND11_WARNING_DISABLE_GCC(name) PYBIND11_PRAGMA(GCC diagnostic ignored name)
#else
# define PYBIND11_WARNING_DISABLE_GCC(name)
#endif
#ifdef PYBIND11_COMPILER_INTEL
# define PYBIND11_WARNING_DISABLE_INTEL(name) PYBIND11_PRAGMA(warning disable name)
#else
# define PYBIND11_WARNING_DISABLE_INTEL(name)
#endif
#define PYBIND11_NAMESPACE_BEGIN(name) \
namespace name { \
PYBIND11_WARNING_PUSH
#define PYBIND11_NAMESPACE_END(name) \
PYBIND11_WARNING_POP \
}
// Robust support for some features and loading modules compiled against different pybind versions
// requires forcing hidden visibility on pybind code, so we enforce this by setting the attribute
// on the main `pybind11` namespace.
#if !defined(PYBIND11_NAMESPACE)
# ifdef __GNUG__
# define PYBIND11_NAMESPACE pybind11 __attribute__((visibility("hidden")))
# else
# define PYBIND11_NAMESPACE pybind11
# endif
#endif
#if !(defined(_MSC_VER) && __cplusplus == 199711L)
# if __cplusplus >= 201402L
# define PYBIND11_CPP14
# if __cplusplus >= 201703L
# define PYBIND11_CPP17
# if __cplusplus >= 202002L
# define PYBIND11_CPP20
// Please update tests/pybind11_tests.cpp `cpp_std()` when adding a macro here.
# endif
# endif
# endif
#elif defined(_MSC_VER) && __cplusplus == 199711L
// MSVC sets _MSVC_LANG rather than __cplusplus (supposedly until the standard is fully
// implemented). Unless you use the /Zc:__cplusplus flag on Visual Studio 2017 15.7 Preview 3
// or newer.
# if _MSVC_LANG >= 201402L
# define PYBIND11_CPP14
# if _MSVC_LANG > 201402L
# define PYBIND11_CPP17
# if _MSVC_LANG >= 202002L
# define PYBIND11_CPP20
# endif
# endif
# endif
#endif
#if defined(PYBIND11_CPP20)
# define PYBIND11_CONSTINIT constinit
# define PYBIND11_DTOR_CONSTEXPR constexpr
#else
# define PYBIND11_CONSTINIT
# define PYBIND11_DTOR_CONSTEXPR
#endif
// Compiler version assertions
#if defined(__INTEL_COMPILER)
# if __INTEL_COMPILER < 1800
# error pybind11 requires Intel C++ compiler v18 or newer
# elif __INTEL_COMPILER < 1900 && defined(PYBIND11_CPP14)
# error pybind11 supports only C++11 with Intel C++ compiler v18. Use v19 or newer for C++14.
# endif
/* The following pragma cannot be pop'ed:
https://community.intel.com/t5/Intel-C-Compiler/Inline-and-no-inline-warning/td-p/1216764 */
# pragma warning disable 2196 // warning #2196: routine is both "inline" and "noinline"
#elif defined(__clang__) && !defined(__apple_build_version__)
# if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 3)
# error pybind11 requires clang 3.3 or newer
# endif
#elif defined(__clang__)
// Apple changes clang version macros to its Xcode version; the first Xcode release based on
// (upstream) clang 3.3 was Xcode 5:
# if __clang_major__ < 5
# error pybind11 requires Xcode/clang 5.0 or newer
# endif
#elif defined(__GNUG__)
# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8)
# error pybind11 requires gcc 4.8 or newer
# endif
#elif defined(_MSC_VER)
# if _MSC_VER < 1910
# error pybind11 2.10+ requires MSVC 2017 or newer
# endif
#endif
#if !defined(PYBIND11_EXPORT)
# if defined(WIN32) || defined(_WIN32)
# define PYBIND11_EXPORT __declspec(dllexport)
# else
# define PYBIND11_EXPORT __attribute__((visibility("default")))
# endif
#endif
#if !defined(PYBIND11_EXPORT_EXCEPTION)
# if defined(__apple_build_version__)
# define PYBIND11_EXPORT_EXCEPTION PYBIND11_EXPORT
# else
# define PYBIND11_EXPORT_EXCEPTION
# endif
#endif
// For CUDA, GCC7, GCC8:
// PYBIND11_NOINLINE_FORCED is incompatible with `-Wattributes -Werror`.
// When defining PYBIND11_NOINLINE_FORCED, it is best to also use `-Wno-attributes`.
// However, the measured shared-library size saving when using noinline are only
// 1.7% for CUDA, -0.2% for GCC7, and 0.0% for GCC8 (using -DCMAKE_BUILD_TYPE=MinSizeRel,
// the default under pybind11/tests).
#if !defined(PYBIND11_NOINLINE_FORCED) \
&& (defined(__CUDACC__) || (defined(__GNUC__) && (__GNUC__ == 7 || __GNUC__ == 8)))
# define PYBIND11_NOINLINE_DISABLED
#endif
// The PYBIND11_NOINLINE macro is for function DEFINITIONS.
// In contrast, FORWARD DECLARATIONS should never use this macro:
// https://stackoverflow.com/questions/9317473/forward-declaration-of-inline-functions
#if defined(PYBIND11_NOINLINE_DISABLED) // Option for maximum portability and experimentation.
# define PYBIND11_NOINLINE inline
#elif defined(_MSC_VER)
# define PYBIND11_NOINLINE __declspec(noinline) inline
#else
# define PYBIND11_NOINLINE __attribute__((noinline)) inline
#endif
#if defined(__MINGW32__)
// For unknown reasons all PYBIND11_DEPRECATED member trigger a warning when declared
// whether it is used or not
# define PYBIND11_DEPRECATED(reason)
#elif defined(PYBIND11_CPP14)
# define PYBIND11_DEPRECATED(reason) [[deprecated(reason)]]
#else
# define PYBIND11_DEPRECATED(reason) __attribute__((deprecated(reason)))
#endif
#if defined(PYBIND11_CPP17)
# define PYBIND11_MAYBE_UNUSED [[maybe_unused]]
#elif defined(_MSC_VER) && !defined(__clang__)
# define PYBIND11_MAYBE_UNUSED
#else
# define PYBIND11_MAYBE_UNUSED __attribute__((__unused__))
#endif
/* Don't let Python.h #define (v)snprintf as macro because they are implemented
properly in Visual Studio since 2015. */
#if defined(_MSC_VER)
# define HAVE_SNPRINTF 1
#endif
/// Include Python header, disable linking to pythonX_d.lib on Windows in debug mode
#if defined(_MSC_VER)
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_MSVC(4505)
// C4505: 'PySlice_GetIndicesEx': unreferenced local function has been removed (PyPy only)
# if defined(_DEBUG) && !defined(Py_DEBUG)
// Workaround for a VS 2022 issue.
// NOTE: This workaround knowingly violates the Python.h include order requirement:
// https://docs.python.org/3/c-api/intro.html#include-files
// See https://github.com/pybind/pybind11/pull/3497 for full context.
# include <yvals.h>
# if _MSVC_STL_VERSION >= 143
# include <crtdefs.h>
# endif
# define PYBIND11_DEBUG_MARKER
# undef _DEBUG
# endif
#endif
// https://en.cppreference.com/w/c/chrono/localtime
#if defined(__STDC_LIB_EXT1__) && !defined(__STDC_WANT_LIB_EXT1__)
# define __STDC_WANT_LIB_EXT1__
#endif
#ifdef __has_include
// std::optional (but including it in c++14 mode isn't allowed)
# if defined(PYBIND11_CPP17) && __has_include(<optional>)
# define PYBIND11_HAS_OPTIONAL 1
# endif
// std::experimental::optional (but not allowed in c++11 mode)
# if defined(PYBIND11_CPP14) && (__has_include(<experimental/optional>) && \
!__has_include(<optional>))
# define PYBIND11_HAS_EXP_OPTIONAL 1
# endif
// std::variant
# if defined(PYBIND11_CPP17) && __has_include(<variant>)
# define PYBIND11_HAS_VARIANT 1
# endif
#elif defined(_MSC_VER) && defined(PYBIND11_CPP17)
# define PYBIND11_HAS_OPTIONAL 1
# define PYBIND11_HAS_VARIANT 1
#endif
#if defined(PYBIND11_CPP17)
# if defined(__has_include)
# if __has_include(<string_view>)
# define PYBIND11_HAS_STRING_VIEW
# endif
# elif defined(_MSC_VER)
# define PYBIND11_HAS_STRING_VIEW
# endif
#endif
#include <Python.h>
#if PY_VERSION_HEX < 0x03070000
# error "PYTHON < 3.7 IS UNSUPPORTED. pybind11 v2.12 was the last to support Python 3.6."
#endif
#include <frameobject.h>
#include <pythread.h>
/* Python #defines overrides on all sorts of core functions, which
tends to weak havok in C++ codebases that expect these to work
like regular functions (potentially with several overloads) */
#if defined(isalnum)
# undef isalnum
# undef isalpha
# undef islower
# undef isspace
# undef isupper
# undef tolower
# undef toupper
#endif
#if defined(copysign)
# undef copysign
#endif
#if defined(PYBIND11_NUMPY_1_ONLY)
# define PYBIND11_INTERNAL_NUMPY_1_ONLY_DETECTED
#endif
#if defined(PYPY_VERSION) && !defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
# define PYBIND11_SIMPLE_GIL_MANAGEMENT
#endif
#if defined(_MSC_VER)
# if defined(PYBIND11_DEBUG_MARKER)
# define _DEBUG
# undef PYBIND11_DEBUG_MARKER
# endif
PYBIND11_WARNING_POP
#endif
#include <cstddef>
#include <cstring>
#include <exception>
#include <forward_list>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <typeindex>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#if defined(__has_include)
# if __has_include(<version>)
# include <version>
# endif
#endif
// Must be after including <version> or one of the other headers specified by the standard
#if defined(__cpp_lib_char8_t) && __cpp_lib_char8_t >= 201811L
# define PYBIND11_HAS_U8STRING
#endif
// See description of PR #4246:
#if !defined(PYBIND11_NO_ASSERT_GIL_HELD_INCREF_DECREF) && !defined(NDEBUG) \
&& !defined(PYPY_VERSION) && !defined(PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF)
# define PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF
#endif
// #define PYBIND11_STR_LEGACY_PERMISSIVE
// If DEFINED, pybind11::str can hold PyUnicodeObject or PyBytesObject
// (probably surprising and never documented, but this was the
// legacy behavior until and including v2.6.x). As a side-effect,
// pybind11::isinstance<str>() is true for both pybind11::str and
// pybind11::bytes.
// If UNDEFINED, pybind11::str can only hold PyUnicodeObject, and
// pybind11::isinstance<str>() is true only for pybind11::str.
// However, for Python 2 only (!), the pybind11::str caster
// implicitly decoded bytes to PyUnicodeObject. This was to ease
// the transition from the legacy behavior to the non-permissive
// behavior.
/// Compatibility macros for Python 2 / Python 3 versions TODO: remove
#define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyInstanceMethod_New(ptr)
#define PYBIND11_INSTANCE_METHOD_CHECK PyInstanceMethod_Check
#define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyInstanceMethod_GET_FUNCTION
#define PYBIND11_BYTES_CHECK PyBytes_Check
#define PYBIND11_BYTES_FROM_STRING PyBytes_FromString
#define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyBytes_FromStringAndSize
#define PYBIND11_BYTES_AS_STRING_AND_SIZE PyBytes_AsStringAndSize
#define PYBIND11_BYTES_AS_STRING PyBytes_AsString
#define PYBIND11_BYTES_SIZE PyBytes_Size
#define PYBIND11_LONG_CHECK(o) PyLong_Check(o)
#define PYBIND11_LONG_AS_LONGLONG(o) PyLong_AsLongLong(o)
#define PYBIND11_LONG_FROM_SIGNED(o) PyLong_FromSsize_t((ssize_t) (o))
#define PYBIND11_LONG_FROM_UNSIGNED(o) PyLong_FromSize_t((size_t) (o))
#define PYBIND11_BYTES_NAME "bytes"
#define PYBIND11_STRING_NAME "str"
#define PYBIND11_SLICE_OBJECT PyObject
#define PYBIND11_FROM_STRING PyUnicode_FromString
#define PYBIND11_STR_TYPE ::pybind11::str
#define PYBIND11_BOOL_ATTR "__bool__"
#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_bool)
#define PYBIND11_BUILTINS_MODULE "builtins"
// Providing a separate declaration to make Clang's -Wmissing-prototypes happy.
// See comment for PYBIND11_MODULE below for why this is marked "maybe unused".
#define PYBIND11_PLUGIN_IMPL(name) \
extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT PyObject *PyInit_##name(); \
extern "C" PYBIND11_EXPORT PyObject *PyInit_##name()
#define PYBIND11_TRY_NEXT_OVERLOAD ((PyObject *) 1) // special failure return code
#define PYBIND11_STRINGIFY(x) #x
#define PYBIND11_TOSTRING(x) PYBIND11_STRINGIFY(x)
#define PYBIND11_CONCAT(first, second) first##second
#define PYBIND11_ENSURE_INTERNALS_READY pybind11::detail::get_internals();
#define PYBIND11_CHECK_PYTHON_VERSION \
{ \
const char *compiled_ver \
= PYBIND11_TOSTRING(PY_MAJOR_VERSION) "." PYBIND11_TOSTRING(PY_MINOR_VERSION); \
const char *runtime_ver = Py_GetVersion(); \
size_t len = std::strlen(compiled_ver); \
if (std::strncmp(runtime_ver, compiled_ver, len) != 0 \
|| (runtime_ver[len] >= '0' && runtime_ver[len] <= '9')) { \
PyErr_Format(PyExc_ImportError, \
"Python version mismatch: module was compiled for Python %s, " \
"but the interpreter version is incompatible: %s.", \
compiled_ver, \
runtime_ver); \
return nullptr; \
} \
}
#define PYBIND11_CATCH_INIT_EXCEPTIONS \
catch (pybind11::error_already_set & e) { \
pybind11::raise_from(e, PyExc_ImportError, "initialization failed"); \
return nullptr; \
} \
catch (const std::exception &e) { \
::pybind11::set_error(PyExc_ImportError, e.what()); \
return nullptr; \
}
/** \rst
***Deprecated in favor of PYBIND11_MODULE***
This macro creates the entry point that will be invoked when the Python interpreter
imports a plugin library. Please create a `module_` in the function body and return
the pointer to its underlying Python object at the end.
.. code-block:: cpp
PYBIND11_PLUGIN(example) {
pybind11::module_ m("example", "pybind11 example plugin");
/// Set up bindings here
return m.ptr();
}
\endrst */
#define PYBIND11_PLUGIN(name) \
PYBIND11_DEPRECATED("PYBIND11_PLUGIN is deprecated, use PYBIND11_MODULE") \
static PyObject *pybind11_init(); \
PYBIND11_PLUGIN_IMPL(name) { \
PYBIND11_CHECK_PYTHON_VERSION \
PYBIND11_ENSURE_INTERNALS_READY \
try { \
return pybind11_init(); \
} \
PYBIND11_CATCH_INIT_EXCEPTIONS \
} \
PyObject *pybind11_init()
/** \rst
This macro creates the entry point that will be invoked when the Python interpreter
imports an extension module. The module name is given as the first argument and it
should not be in quotes. The second macro argument defines a variable of type
`py::module_` which can be used to initialize the module.
The entry point is marked as "maybe unused" to aid dead-code detection analysis:
since the entry point is typically only looked up at runtime and not referenced
during translation, it would otherwise appear as unused ("dead") code.
.. code-block:: cpp
PYBIND11_MODULE(example, m) {
m.doc() = "pybind11 example module";
// Add bindings here
m.def("foo", []() {
return "Hello, World!";
});
}
The third macro argument is optional (available since 2.13.0), and can be used to
mark the extension module as safe to run without the GIL under a free-threaded CPython
interpreter. Passing this argument has no effect on other interpreters.
.. code-block:: cpp
PYBIND11_MODULE(example, m, py::mod_gil_not_used()) {
m.doc() = "pybind11 example module safe to run without the GIL";
// Add bindings here
m.def("foo", []() {
return "Hello, Free-threaded World!";
});
}
\endrst */
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_CLANG("-Wgnu-zero-variadic-macro-arguments")
#define PYBIND11_MODULE(name, variable, ...) \
static ::pybind11::module_::module_def PYBIND11_CONCAT(pybind11_module_def_, name) \
PYBIND11_MAYBE_UNUSED; \
PYBIND11_MAYBE_UNUSED \
static void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &); \
PYBIND11_PLUGIN_IMPL(name) { \
PYBIND11_CHECK_PYTHON_VERSION \
PYBIND11_ENSURE_INTERNALS_READY \
auto m = ::pybind11::module_::create_extension_module( \
PYBIND11_TOSTRING(name), \
nullptr, \
&PYBIND11_CONCAT(pybind11_module_def_, name), \
##__VA_ARGS__); \
try { \
PYBIND11_CONCAT(pybind11_init_, name)(m); \
return m.ptr(); \
} \
PYBIND11_CATCH_INIT_EXCEPTIONS \
} \
void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ & (variable))
PYBIND11_WARNING_POP
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
using ssize_t = Py_ssize_t;
using size_t = std::size_t;
template <typename IntType>
inline ssize_t ssize_t_cast(const IntType &val) {
static_assert(sizeof(IntType) <= sizeof(ssize_t), "Implicit narrowing is not permitted.");
return static_cast<ssize_t>(val);
}
/// Approach used to cast a previously unknown C++ instance into a Python object
enum class return_value_policy : uint8_t {
/** This is the default return value policy, which falls back to the policy
return_value_policy::take_ownership when the return value is a pointer.
Otherwise, it uses return_value::move or return_value::copy for rvalue
and lvalue references, respectively. See below for a description of what
all of these different policies do. */
automatic = 0,
/** As above, but use policy return_value_policy::reference when the return
value is a pointer. This is the default conversion policy for function
arguments when calling Python functions manually from C++ code (i.e. via
handle::operator()). You probably won't need to use this. */
automatic_reference,
/** Reference an existing object (i.e. do not create a new copy) and take
ownership. Python will call the destructor and delete operator when the
object's reference count reaches zero. Undefined behavior ensues when
the C++ side does the same.. */
take_ownership,
/** Create a new copy of the returned object, which will be owned by
Python. This policy is comparably safe because the lifetimes of the two
instances are decoupled. */
copy,
/** Use std::move to move the return value contents into a new instance
that will be owned by Python. This policy is comparably safe because the
lifetimes of the two instances (move source and destination) are
decoupled. */
move,
/** Reference an existing object, but do not take ownership. The C++ side
is responsible for managing the object's lifetime and deallocating it
when it is no longer used. Warning: undefined behavior will ensue when
the C++ side deletes an object that is still referenced and used by
Python. */
reference,
/** This policy only applies to methods and properties. It references the
object without taking ownership similar to the above
return_value_policy::reference policy. In contrast to that policy, the
function or property's implicit this argument (called the parent) is
considered to be the owner of the return value (the child).
pybind11 then couples the lifetime of the parent to the child via a
reference relationship that ensures that the parent cannot be garbage
collected while Python is still using the child. More advanced
variations of this scheme are also possible using combinations of
return_value_policy::reference and the keep_alive call policy */
reference_internal
};
PYBIND11_NAMESPACE_BEGIN(detail)
inline static constexpr int log2(size_t n, int k = 0) {
return (n <= 1) ? k : log2(n >> 1, k + 1);
}
// Returns the size as a multiple of sizeof(void *), rounded up.
inline static constexpr size_t size_in_ptrs(size_t s) {
return 1 + ((s - 1) >> log2(sizeof(void *)));
}
/**
* The space to allocate for simple layout instance holders (see below) in multiple of the size of
* a pointer (e.g. 2 means 16 bytes on 64-bit architectures). The default is the minimum required
* to holder either a std::unique_ptr or std::shared_ptr (which is almost always
* sizeof(std::shared_ptr<T>)).
*/
constexpr size_t instance_simple_holder_in_ptrs() {
static_assert(sizeof(std::shared_ptr<int>) >= sizeof(std::unique_ptr<int>),
"pybind assumes std::shared_ptrs are at least as big as std::unique_ptrs");
return size_in_ptrs(sizeof(std::shared_ptr<int>));
}
// Forward declarations
struct type_info;
struct value_and_holder;
struct nonsimple_values_and_holders {
void **values_and_holders;
uint8_t *status;
};
/// The 'instance' type which needs to be standard layout (need to be able to use 'offsetof')
struct instance {
PyObject_HEAD
/// Storage for pointers and holder; see simple_layout, below, for a description
union {
void *simple_value_holder[1 + instance_simple_holder_in_ptrs()];
nonsimple_values_and_holders nonsimple;
};
/// Weak references
PyObject *weakrefs;
/// If true, the pointer is owned which means we're free to manage it with a holder.
bool owned : 1;
/**
* An instance has two possible value/holder layouts.
*
* Simple layout (when this flag is true), means the `simple_value_holder` is set with a
* pointer and the holder object governing that pointer, i.e. [val1*][holder]. This layout is
* applied whenever there is no python-side multiple inheritance of bound C++ types *and* the
* type's holder will fit in the default space (which is large enough to hold either a
* std::unique_ptr or std::shared_ptr).
*
* Non-simple layout applies when using custom holders that require more space than
* `shared_ptr` (which is typically the size of two pointers), or when multiple inheritance is
* used on the python side. Non-simple layout allocates the required amount of memory to have
* multiple bound C++ classes as parents. Under this layout, `nonsimple.values_and_holders` is
* set to a pointer to allocated space of the required space to hold a sequence of value
* pointers and holders followed `status`, a set of bit flags (1 byte each), i.e.
* [val1*][holder1][val2*][holder2]...[bb...] where each [block] is rounded up to a multiple
* of `sizeof(void *)`. `nonsimple.status` is, for convenience, a pointer to the beginning of
* the [bb...] block (but not independently allocated).
*
* Status bits indicate whether the associated holder is constructed (&
* status_holder_constructed) and whether the value pointer is registered (&
* status_instance_registered) in `registered_instances`.
*/
bool simple_layout : 1;
/// For simple layout, tracks whether the holder has been constructed
bool simple_holder_constructed : 1;
/// For simple layout, tracks whether the instance is registered in `registered_instances`
bool simple_instance_registered : 1;
/// If true, get_internals().patients has an entry for this object
bool has_patients : 1;
/// Initializes all of the above type/values/holders data (but not the instance values
/// themselves)
void allocate_layout();
/// Destroys/deallocates all of the above
void deallocate_layout();
/// Returns the value_and_holder wrapper for the given type (or the first, if `find_type`
/// omitted). Returns a default-constructed (with `.inst = nullptr`) object on failure if
/// `throw_if_missing` is false.
value_and_holder get_value_and_holder(const type_info *find_type = nullptr,
bool throw_if_missing = true);
/// Bit values for the non-simple status flags
static constexpr uint8_t status_holder_constructed = 1;
static constexpr uint8_t status_instance_registered = 2;
};
static_assert(std::is_standard_layout<instance>::value,
"Internal error: `pybind11::detail::instance` is not standard layout!");
/// from __cpp_future__ import (convenient aliases from C++14/17)
#if defined(PYBIND11_CPP14)
using std::conditional_t;
using std::enable_if_t;
using std::remove_cv_t;
using std::remove_reference_t;
#else
template <bool B, typename T = void>
using enable_if_t = typename std::enable_if<B, T>::type;
template <bool B, typename T, typename F>
using conditional_t = typename std::conditional<B, T, F>::type;
template <typename T>
using remove_cv_t = typename std::remove_cv<T>::type;
template <typename T>
using remove_reference_t = typename std::remove_reference<T>::type;
#endif
#if defined(PYBIND11_CPP20)
using std::remove_cvref;
using std::remove_cvref_t;
#else
template <class T>
struct remove_cvref {
using type = remove_cv_t<remove_reference_t<T>>;
};
template <class T>
using remove_cvref_t = typename remove_cvref<T>::type;
#endif
/// Example usage: is_same_ignoring_cvref<T, PyObject *>::value
template <typename T, typename U>
using is_same_ignoring_cvref = std::is_same<detail::remove_cvref_t<T>, U>;
/// Index sequences
#if defined(PYBIND11_CPP14)
using std::index_sequence;
using std::make_index_sequence;
#else
template <size_t...>
struct index_sequence {};
template <size_t N, size_t... S>
struct make_index_sequence_impl : make_index_sequence_impl<N - 1, N - 1, S...> {};
template <size_t... S>
struct make_index_sequence_impl<0, S...> {
using type = index_sequence<S...>;
};
template <size_t N>
using make_index_sequence = typename make_index_sequence_impl<N>::type;
#endif
/// Make an index sequence of the indices of true arguments
template <typename ISeq, size_t, bool...>
struct select_indices_impl {
using type = ISeq;
};
template <size_t... IPrev, size_t I, bool B, bool... Bs>
struct select_indices_impl<index_sequence<IPrev...>, I, B, Bs...>
: select_indices_impl<conditional_t<B, index_sequence<IPrev..., I>, index_sequence<IPrev...>>,
I + 1,
Bs...> {};
template <bool... Bs>
using select_indices = typename select_indices_impl<index_sequence<>, 0, Bs...>::type;
/// Backports of std::bool_constant and std::negation to accommodate older compilers
template <bool B>
using bool_constant = std::integral_constant<bool, B>;
template <typename T>
struct negation : bool_constant<!T::value> {};
// PGI/Intel cannot detect operator delete with the "compatible" void_t impl, so
// using the new one (C++14 defect, so generally works on newer compilers, even
// if not in C++17 mode)
#if defined(__PGIC__) || defined(__INTEL_COMPILER)
template <typename...>
using void_t = void;
#else
template <typename...>
struct void_t_impl {
using type = void;
};
template <typename... Ts>
using void_t = typename void_t_impl<Ts...>::type;
#endif
/// Compile-time all/any/none of that check the boolean value of all template types
#if defined(__cpp_fold_expressions) && !(defined(_MSC_VER) && (_MSC_VER < 1916))
template <class... Ts>
using all_of = bool_constant<(Ts::value && ...)>;
template <class... Ts>
using any_of = bool_constant<(Ts::value || ...)>;
#elif !defined(_MSC_VER)
template <bool...>
struct bools {};
template <class... Ts>
using all_of = std::is_same<bools<Ts::value..., true>, bools<true, Ts::value...>>;
template <class... Ts>
using any_of = negation<all_of<negation<Ts>...>>;
#else
// MSVC has trouble with the above, but supports std::conjunction, which we can use instead (albeit
// at a slight loss of compilation efficiency).
template <class... Ts>
using all_of = std::conjunction<Ts...>;
template <class... Ts>
using any_of = std::disjunction<Ts...>;
#endif
template <class... Ts>
using none_of = negation<any_of<Ts...>>;
template <class T, template <class> class... Predicates>
using satisfies_all_of = all_of<Predicates<T>...>;
template <class T, template <class> class... Predicates>
using satisfies_any_of = any_of<Predicates<T>...>;
template <class T, template <class> class... Predicates>
using satisfies_none_of = none_of<Predicates<T>...>;
/// Strip the class from a method type
template <typename T>
struct remove_class {};
template <typename C, typename R, typename... A>
struct remove_class<R (C::*)(A...)> {
using type = R(A...);
};
template <typename C, typename R, typename... A>
struct remove_class<R (C::*)(A...) const> {
using type = R(A...);
};
#ifdef __cpp_noexcept_function_type
template <typename C, typename R, typename... A>
struct remove_class<R (C::*)(A...) noexcept> {
using type = R(A...);
};
template <typename C, typename R, typename... A>
struct remove_class<R (C::*)(A...) const noexcept> {
using type = R(A...);
};
#endif
/// Helper template to strip away type modifiers
template <typename T>
struct intrinsic_type {
using type = T;
};
template <typename T>
struct intrinsic_type<const T> {
using type = typename intrinsic_type<T>::type;
};
template <typename T>
struct intrinsic_type<T *> {
using type = typename intrinsic_type<T>::type;
};
template <typename T>
struct intrinsic_type<T &> {
using type = typename intrinsic_type<T>::type;
};
template <typename T>
struct intrinsic_type<T &&> {
using type = typename intrinsic_type<T>::type;
};
template <typename T, size_t N>
struct intrinsic_type<const T[N]> {
using type = typename intrinsic_type<T>::type;
};
template <typename T, size_t N>
struct intrinsic_type<T[N]> {
using type = typename intrinsic_type<T>::type;
};
template <typename T>
using intrinsic_t = typename intrinsic_type<T>::type;
/// Helper type to replace 'void' in some expressions
struct void_type {};
/// Helper template which holds a list of types
template <typename...>
struct type_list {};
/// Compile-time integer sum
#ifdef __cpp_fold_expressions
template <typename... Ts>
constexpr size_t constexpr_sum(Ts... ns) {
return (0 + ... + size_t{ns});
}
#else
constexpr size_t constexpr_sum() { return 0; }
template <typename T, typename... Ts>
constexpr size_t constexpr_sum(T n, Ts... ns) {
return size_t{n} + constexpr_sum(ns...);
}
#endif
PYBIND11_NAMESPACE_BEGIN(constexpr_impl)
/// Implementation details for constexpr functions
constexpr int first(int i) { return i; }
template <typename T, typename... Ts>
constexpr int first(int i, T v, Ts... vs) {
return v ? i : first(i + 1, vs...);
}
constexpr int last(int /*i*/, int result) { return result; }
template <typename T, typename... Ts>
constexpr int last(int i, int result, T v, Ts... vs) {
return last(i + 1, v ? i : result, vs...);
}
PYBIND11_NAMESPACE_END(constexpr_impl)
/// Return the index of the first type in Ts which satisfies Predicate<T>.
/// Returns sizeof...(Ts) if none match.
template <template <typename> class Predicate, typename... Ts>
constexpr int constexpr_first() {
return constexpr_impl::first(0, Predicate<Ts>::value...);
}
/// Return the index of the last type in Ts which satisfies Predicate<T>, or -1 if none match.
template <template <typename> class Predicate, typename... Ts>
constexpr int constexpr_last() {
return constexpr_impl::last(0, -1, Predicate<Ts>::value...);
}
/// Return the Nth element from the parameter pack
template <size_t N, typename T, typename... Ts>
struct pack_element {
using type = typename pack_element<N - 1, Ts...>::type;
};
template <typename T, typename... Ts>
struct pack_element<0, T, Ts...> {
using type = T;
};
/// Return the one and only type which matches the predicate, or Default if none match.
/// If more than one type matches the predicate, fail at compile-time.
template <template <typename> class Predicate, typename Default, typename... Ts>
struct exactly_one {
static constexpr auto found = constexpr_sum(Predicate<Ts>::value...);
static_assert(found <= 1, "Found more than one type matching the predicate");
static constexpr auto index = found ? constexpr_first<Predicate, Ts...>() : 0;
using type = conditional_t<found, typename pack_element<index, Ts...>::type, Default>;
};
template <template <typename> class P, typename Default>
struct exactly_one<P, Default> {
using type = Default;
};
template <template <typename> class Predicate, typename Default, typename... Ts>
using exactly_one_t = typename exactly_one<Predicate, Default, Ts...>::type;
/// Defer the evaluation of type T until types Us are instantiated
template <typename T, typename... /*Us*/>
struct deferred_type {
using type = T;
};
template <typename T, typename... Us>
using deferred_t = typename deferred_type<T, Us...>::type;
/// Like is_base_of, but requires a strict base (i.e. `is_strict_base_of<T, T>::value == false`,
/// unlike `std::is_base_of`)
template <typename Base, typename Derived>
using is_strict_base_of
= bool_constant<std::is_base_of<Base, Derived>::value && !std::is_same<Base, Derived>::value>;
/// Like is_base_of, but also requires that the base type is accessible (i.e. that a Derived
/// pointer can be converted to a Base pointer) For unions, `is_base_of<T, T>::value` is False, so
/// we need to check `is_same` as well.
template <typename Base, typename Derived>
using is_accessible_base_of
= bool_constant<(std::is_same<Base, Derived>::value || std::is_base_of<Base, Derived>::value)
&& std::is_convertible<Derived *, Base *>::value>;
template <template <typename...> class Base>
struct is_template_base_of_impl {
template <typename... Us>
static std::true_type check(Base<Us...> *);
static std::false_type check(...);
};
/// Check if a template is the base of a type. For example:
/// `is_template_base_of<Base, T>` is true if `struct T : Base<U> {}` where U can be anything
template <template <typename...> class Base, typename T>
// Sadly, all MSVC versions incl. 2022 need the workaround, even in C++20 mode.
// See also: https://github.com/pybind/pybind11/pull/3741
#if !defined(_MSC_VER)
using is_template_base_of
= decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T> *) nullptr));
#else
struct is_template_base_of
: decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T> *) nullptr)){};
#endif
/// Check if T is an instantiation of the template `Class`. For example:
/// `is_instantiation<shared_ptr, T>` is true if `T == shared_ptr<U>` where U can be anything.
template <template <typename...> class Class, typename T>
struct is_instantiation : std::false_type {};
template <template <typename...> class Class, typename... Us>
struct is_instantiation<Class, Class<Us...>> : std::true_type {};
/// Check if T is std::shared_ptr<U> where U can be anything
template <typename T>
using is_shared_ptr = is_instantiation<std::shared_ptr, T>;
/// Check if T looks like an input iterator
template <typename T, typename = void>
struct is_input_iterator : std::false_type {};
template <typename T>
struct is_input_iterator<T,
void_t<decltype(*std::declval<T &>()), decltype(++std::declval<T &>())>>
: std::true_type {};
template <typename T>
using is_function_pointer
= bool_constant<std::is_pointer<T>::value
&& std::is_function<typename std::remove_pointer<T>::type>::value>;
template <typename F>
struct strip_function_object {
// If you are encountering an
// 'error: name followed by "::" must be a class or namespace name'
// with the Intel compiler and a noexcept function here,
// try to use noexcept(true) instead of plain noexcept.
using type = typename remove_class<decltype(&F::operator())>::type;
};
// Extracts the function signature from a function, function pointer or lambda.
template <typename Function, typename F = remove_reference_t<Function>>
using function_signature_t = conditional_t<
std::is_function<F>::value,
F,
typename conditional_t<std::is_pointer<F>::value || std::is_member_pointer<F>::value,
std::remove_pointer<F>,
strip_function_object<F>>::type>;
/// Returns true if the type looks like a lambda: that is, isn't a function, pointer or member
/// pointer. Note that this can catch all sorts of other things, too; this is intended to be used
/// in a place where passing a lambda makes sense.
template <typename T>
using is_lambda = satisfies_none_of<remove_reference_t<T>,
std::is_function,
std::is_pointer,
std::is_member_pointer>;
// [workaround(intel)] Internal error on fold expression
/// Apply a function over each element of a parameter pack
#if defined(__cpp_fold_expressions) && !defined(__INTEL_COMPILER)
// Intel compiler produces an internal error on this fold expression (tested with ICC 19.0.2)
# define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) (((PATTERN), void()), ...)
#else
using expand_side_effects = bool[];
# define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) \
(void) pybind11::detail::expand_side_effects { ((PATTERN), void(), false)..., false }
#endif
PYBIND11_NAMESPACE_END(detail)
/// C++ bindings of builtin Python exceptions
class PYBIND11_EXPORT_EXCEPTION builtin_exception : public std::runtime_error {
public:
using std::runtime_error::runtime_error;
/// Set the error using the Python C API
virtual void set_error() const = 0;
};
#define PYBIND11_RUNTIME_EXCEPTION(name, type) \
class PYBIND11_EXPORT_EXCEPTION name : public builtin_exception { \
public: \
using builtin_exception::builtin_exception; \
name() : name("") {} \
void set_error() const override { PyErr_SetString(type, what()); } \
};
PYBIND11_RUNTIME_EXCEPTION(stop_iteration, PyExc_StopIteration)
PYBIND11_RUNTIME_EXCEPTION(index_error, PyExc_IndexError)
PYBIND11_RUNTIME_EXCEPTION(key_error, PyExc_KeyError)
PYBIND11_RUNTIME_EXCEPTION(value_error, PyExc_ValueError)
PYBIND11_RUNTIME_EXCEPTION(type_error, PyExc_TypeError)
PYBIND11_RUNTIME_EXCEPTION(buffer_error, PyExc_BufferError)
PYBIND11_RUNTIME_EXCEPTION(import_error, PyExc_ImportError)
PYBIND11_RUNTIME_EXCEPTION(attribute_error, PyExc_AttributeError)
PYBIND11_RUNTIME_EXCEPTION(cast_error, PyExc_RuntimeError) /// Thrown when pybind11::cast or
/// handle::call fail due to a type
/// casting error
PYBIND11_RUNTIME_EXCEPTION(reference_cast_error, PyExc_RuntimeError) /// Used internally
[[noreturn]] PYBIND11_NOINLINE void pybind11_fail(const char *reason) {
assert(!PyErr_Occurred());
throw std::runtime_error(reason);
}
[[noreturn]] PYBIND11_NOINLINE void pybind11_fail(const std::string &reason) {
assert(!PyErr_Occurred());
throw std::runtime_error(reason);
}
template <typename T, typename SFINAE = void>
struct format_descriptor {};
template <typename T>
struct format_descriptor<
T,
detail::enable_if_t<detail::is_same_ignoring_cvref<T, PyObject *>::value>> {
static constexpr const char c = 'O';
static constexpr const char value[2] = {c, '\0'};
static std::string format() { return std::string(1, c); }
};
PYBIND11_NAMESPACE_BEGIN(detail)
// Returns the index of the given type in the type char array below, and in the list in numpy.h
// The order here is: bool; 8 ints ((signed,unsigned)x(8,16,32,64)bits); float,double,long double;
// complex float,double,long double. Note that the long double types only participate when long
// double is actually longer than double (it isn't under MSVC).
// NB: not only the string below but also complex.h and numpy.h rely on this order.
template <typename T, typename SFINAE = void>
struct is_fmt_numeric {
static constexpr bool value = false;
};
template <typename T>
struct is_fmt_numeric<T, enable_if_t<std::is_arithmetic<T>::value>> {
static constexpr bool value = true;
static constexpr int index
= std::is_same<T, bool>::value
? 0
: 1
+ (std::is_integral<T>::value
? detail::log2(sizeof(T)) * 2 + std::is_unsigned<T>::value
: 8
+ (std::is_same<T, double>::value ? 1
: std::is_same<T, long double>::value ? 2
: 0));
};
PYBIND11_NAMESPACE_END(detail)
template <typename T>
struct format_descriptor<T, detail::enable_if_t<std::is_arithmetic<T>::value>> {
static constexpr const char c = "?bBhHiIqQfdg"[detail::is_fmt_numeric<T>::index];
static constexpr const char value[2] = {c, '\0'};
static std::string format() { return std::string(1, c); }
};
#if !defined(PYBIND11_CPP17)
template <typename T>
constexpr const char
format_descriptor<T, detail::enable_if_t<std::is_arithmetic<T>::value>>::value[2];
#endif
/// RAII wrapper that temporarily clears any Python error state
struct error_scope {
PyObject *type, *value, *trace;
error_scope() { PyErr_Fetch(&type, &value, &trace); }
error_scope(const error_scope &) = delete;
error_scope &operator=(const error_scope &) = delete;
~error_scope() { PyErr_Restore(type, value, trace); }
};
/// Dummy destructor wrapper that can be used to expose classes with a private destructor
struct nodelete {
template <typename T>
void operator()(T *) {}
};
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename... Args>
struct overload_cast_impl {
template <typename Return>
constexpr auto operator()(Return (*pf)(Args...)) const noexcept -> decltype(pf) {
return pf;
}
template <typename Return, typename Class>
constexpr auto operator()(Return (Class::*pmf)(Args...),
std::false_type = {}) const noexcept -> decltype(pmf) {
return pmf;
}
template <typename Return, typename Class>
constexpr auto operator()(Return (Class::*pmf)(Args...) const,
std::true_type) const noexcept -> decltype(pmf) {
return pmf;
}
};
PYBIND11_NAMESPACE_END(detail)
// overload_cast requires variable templates: C++14
#if defined(PYBIND11_CPP14)
# define PYBIND11_OVERLOAD_CAST 1
/// Syntax sugar for resolving overloaded function pointers:
/// - regular: static_cast<Return (Class::*)(Arg0, Arg1, Arg2)>(&Class::func)
/// - sweet: overload_cast<Arg0, Arg1, Arg2>(&Class::func)
template <typename... Args>
static constexpr detail::overload_cast_impl<Args...> overload_cast{};
#endif
/// Const member function selector for overload_cast
/// - regular: static_cast<Return (Class::*)(Arg) const>(&Class::func)
/// - sweet: overload_cast<Arg>(&Class::func, const_)
static constexpr auto const_ = std::true_type{};
#if !defined(PYBIND11_CPP14) // no overload_cast: providing something that static_assert-fails:
template <typename... Args>
struct overload_cast {
static_assert(detail::deferred_t<std::false_type, Args...>::value,
"pybind11::overload_cast<...> requires compiling in C++14 mode");
};
#endif // overload_cast
PYBIND11_NAMESPACE_BEGIN(detail)
// Adaptor for converting arbitrary container arguments into a vector; implicitly convertible from
// any standard container (or C-style array) supporting std::begin/std::end, any singleton
// arithmetic type (if T is arithmetic), or explicitly constructible from an iterator pair.
template <typename T>
class any_container {
std::vector<T> v;
public:
any_container() = default;
// Can construct from a pair of iterators
template <typename It, typename = enable_if_t<is_input_iterator<It>::value>>
any_container(It first, It last) : v(first, last) {}
// Implicit conversion constructor from any arbitrary container type
// with values convertible to T
template <typename Container,
typename = enable_if_t<
std::is_convertible<decltype(*std::begin(std::declval<const Container &>())),
T>::value>>
// NOLINTNEXTLINE(google-explicit-constructor)
any_container(const Container &c) : any_container(std::begin(c), std::end(c)) {}
// initializer_list's aren't deducible, so don't get matched by the above template;
// we need this to explicitly allow implicit conversion from one:
template <typename TIn, typename = enable_if_t<std::is_convertible<TIn, T>::value>>
any_container(const std::initializer_list<TIn> &c) : any_container(c.begin(), c.end()) {}
// Avoid copying if given an rvalue vector of the correct type.
// NOLINTNEXTLINE(google-explicit-constructor)
any_container(std::vector<T> &&v) : v(std::move(v)) {}
// Moves the vector out of an rvalue any_container
// NOLINTNEXTLINE(google-explicit-constructor)
operator std::vector<T> &&() && { return std::move(v); }
// Dereferencing obtains a reference to the underlying vector
std::vector<T> &operator*() { return v; }
const std::vector<T> &operator*() const { return v; }
// -> lets you call methods on the underlying vector
std::vector<T> *operator->() { return &v; }
const std::vector<T> *operator->() const { return &v; }
};
// Forward-declaration; see detail/class.h
std::string get_fully_qualified_tp_name(PyTypeObject *);
template <typename T>
inline static std::shared_ptr<T>
try_get_shared_from_this(std::enable_shared_from_this<T> *holder_value_ptr) {
// Pre C++17, this code path exploits undefined behavior, but is known to work on many platforms.
// Use at your own risk!
// See also https://en.cppreference.com/w/cpp/memory/enable_shared_from_this, and in particular
// the `std::shared_ptr<Good> gp1 = not_so_good.getptr();` and `try`-`catch` parts of the example.
#if defined(__cpp_lib_enable_shared_from_this) && (!defined(_MSC_VER) || _MSC_VER >= 1912)
return holder_value_ptr->weak_from_this().lock();
#else
try {
return holder_value_ptr->shared_from_this();
} catch (const std::bad_weak_ptr &) {
return nullptr;
}
#endif
}
// For silencing "unused" compiler warnings in special situations.
template <typename... Args>
#if defined(_MSC_VER) && _MSC_VER < 1920 // MSVC 2017
constexpr
#endif
inline void
silence_unused_warnings(Args &&...) {
}
// MSVC warning C4100: Unreferenced formal parameter
#if defined(_MSC_VER) && _MSC_VER <= 1916
# define PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(...) \
detail::silence_unused_warnings(__VA_ARGS__)
#else
# define PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(...)
#endif
// GCC -Wunused-but-set-parameter All GCC versions (as of July 2021).
#if defined(__GNUG__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
# define PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(...) \
detail::silence_unused_warnings(__VA_ARGS__)
#else
# define PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(...)
#endif
#if defined(__clang__) \
&& (defined(__apple_build_version__) /* AppleClang 13.0.0.13000029 was the only data point \
available. */ \
|| (__clang_major__ >= 7 \
&& __clang_major__ <= 12) /* Clang 3, 5, 13, 14, 15 do not generate the warning. */ \
)
# define PYBIND11_DETECTED_CLANG_WITH_MISLEADING_CALL_STD_MOVE_EXPLICITLY_WARNING
// Example:
// tests/test_kwargs_and_defaults.cpp:46:68: error: local variable 'args' will be copied despite
// being returned by name [-Werror,-Wreturn-std-move]
// m.def("args_function", [](py::args args) -> py::tuple { return args; });
// ^~~~
// test_kwargs_and_defaults.cpp:46:68: note: call 'std::move' explicitly to avoid copying
// m.def("args_function", [](py::args args) -> py::tuple { return args; });
// ^~~~
// std::move(args)
#endif
// Pybind offers detailed error messages by default for all builts that are debug (through the
// negation of NDEBUG). This can also be manually enabled by users, for any builds, through
// defining PYBIND11_DETAILED_ERROR_MESSAGES. This information is primarily useful for those
// who are writing (as opposed to merely using) libraries that use pybind11.
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES) && !defined(NDEBUG)
# define PYBIND11_DETAILED_ERROR_MESSAGES
#endif
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
===================================================================================================================================
SOURCE CODE FILE: cpp_conduit.h
LINES: 1
SIZE: 2.60 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\detail\cpp_conduit.h
ENCODING: utf-8
```h
// Copyright (c) 2024 The pybind Community.
#pragma once
#include <pybind11/pytypes.h>
#include "common.h"
#include "internals.h"
#include <typeinfo>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
// Forward declaration needed here: Refactoring opportunity.
extern "C" inline PyObject *pybind11_object_new(PyTypeObject *type, PyObject *, PyObject *);
inline bool type_is_managed_by_our_internals(PyTypeObject *type_obj) {
#if defined(PYPY_VERSION)
auto &internals = get_internals();
return bool(internals.registered_types_py.find(type_obj)
!= internals.registered_types_py.end());
#else
return bool(type_obj->tp_new == pybind11_object_new);
#endif
}
inline bool is_instance_method_of_type(PyTypeObject *type_obj, PyObject *attr_name) {
PyObject *descr = _PyType_Lookup(type_obj, attr_name);
return bool((descr != nullptr) && PyInstanceMethod_Check(descr));
}
inline object try_get_cpp_conduit_method(PyObject *obj) {
if (PyType_Check(obj)) {
return object();
}
PyTypeObject *type_obj = Py_TYPE(obj);
str attr_name("_pybind11_conduit_v1_");
bool assumed_to_be_callable = false;
if (type_is_managed_by_our_internals(type_obj)) {
if (!is_instance_method_of_type(type_obj, attr_name.ptr())) {
return object();
}
assumed_to_be_callable = true;
}
PyObject *method = PyObject_GetAttr(obj, attr_name.ptr());
if (method == nullptr) {
PyErr_Clear();
return object();
}
if (!assumed_to_be_callable && PyCallable_Check(method) == 0) {
Py_DECREF(method);
return object();
}
return reinterpret_steal<object>(method);
}
inline void *try_raw_pointer_ephemeral_from_cpp_conduit(handle src,
const std::type_info *cpp_type_info) {
object method = try_get_cpp_conduit_method(src.ptr());
if (method) {
capsule cpp_type_info_capsule(const_cast<void *>(static_cast<const void *>(cpp_type_info)),
typeid(std::type_info).name());
object cpp_conduit = method(bytes(PYBIND11_PLATFORM_ABI_ID),
cpp_type_info_capsule,
bytes("raw_pointer_ephemeral"));
if (isinstance<capsule>(cpp_conduit)) {
return reinterpret_borrow<capsule>(cpp_conduit).get_pointer();
}
}
return nullptr;
}
#define PYBIND11_HAS_CPP_CONDUIT 1
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=============================================================================================================================
SOURCE CODE FILE: descr.h
LINES: 1
SIZE: 6.06 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\detail\descr.h
ENCODING: utf-8
```h
/*
pybind11/detail/descr.h: Helper type for concatenating type signatures at compile time
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "common.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
#if !defined(_MSC_VER)
# define PYBIND11_DESCR_CONSTEXPR static constexpr
#else
# define PYBIND11_DESCR_CONSTEXPR const
#endif
/* Concatenate type signatures at compile time */
template <size_t N, typename... Ts>
struct descr {
char text[N + 1]{'\0'};
constexpr descr() = default;
// NOLINTNEXTLINE(google-explicit-constructor)
constexpr descr(char const (&s)[N + 1]) : descr(s, make_index_sequence<N>()) {}
template <size_t... Is>
constexpr descr(char const (&s)[N + 1], index_sequence<Is...>) : text{s[Is]..., '\0'} {}
template <typename... Chars>
// NOLINTNEXTLINE(google-explicit-constructor)
constexpr descr(char c, Chars... cs) : text{c, static_cast<char>(cs)..., '\0'} {}
static constexpr std::array<const std::type_info *, sizeof...(Ts) + 1> types() {
return {{&typeid(Ts)..., nullptr}};
}
};
template <size_t N1, size_t N2, typename... Ts1, typename... Ts2, size_t... Is1, size_t... Is2>
constexpr descr<N1 + N2, Ts1..., Ts2...> plus_impl(const descr<N1, Ts1...> &a,
const descr<N2, Ts2...> &b,
index_sequence<Is1...>,
index_sequence<Is2...>) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(b);
return {a.text[Is1]..., b.text[Is2]...};
}
template <size_t N1, size_t N2, typename... Ts1, typename... Ts2>
constexpr descr<N1 + N2, Ts1..., Ts2...> operator+(const descr<N1, Ts1...> &a,
const descr<N2, Ts2...> &b) {
return plus_impl(a, b, make_index_sequence<N1>(), make_index_sequence<N2>());
}
template <size_t N>
constexpr descr<N - 1> const_name(char const (&text)[N]) {
return descr<N - 1>(text);
}
constexpr descr<0> const_name(char const (&)[1]) { return {}; }
template <size_t Rem, size_t... Digits>
struct int_to_str : int_to_str<Rem / 10, Rem % 10, Digits...> {};
template <size_t... Digits>
struct int_to_str<0, Digits...> {
// WARNING: This only works with C++17 or higher.
static constexpr auto digits = descr<sizeof...(Digits)>(('0' + Digits)...);
};
// Ternary description (like std::conditional)
template <bool B, size_t N1, size_t N2>
constexpr enable_if_t<B, descr<N1 - 1>> const_name(char const (&text1)[N1], char const (&)[N2]) {
return const_name(text1);
}
template <bool B, size_t N1, size_t N2>
constexpr enable_if_t<!B, descr<N2 - 1>> const_name(char const (&)[N1], char const (&text2)[N2]) {
return const_name(text2);
}
template <bool B, typename T1, typename T2>
constexpr enable_if_t<B, T1> const_name(const T1 &d, const T2 &) {
return d;
}
template <bool B, typename T1, typename T2>
constexpr enable_if_t<!B, T2> const_name(const T1 &, const T2 &d) {
return d;
}
template <size_t Size>
auto constexpr const_name() -> remove_cv_t<decltype(int_to_str<Size / 10, Size % 10>::digits)> {
return int_to_str<Size / 10, Size % 10>::digits;
}
template <typename Type>
constexpr descr<1, Type> const_name() {
return {'%'};
}
// If "_" is defined as a macro, py::detail::_ cannot be provided.
// It is therefore best to use py::detail::const_name universally.
// This block is for backward compatibility only.
// (The const_name code is repeated to avoid introducing a "_" #define ourselves.)
#ifndef _
# define PYBIND11_DETAIL_UNDERSCORE_BACKWARD_COMPATIBILITY
template <size_t N>
constexpr descr<N - 1> _(char const (&text)[N]) {
return const_name<N>(text);
}
template <bool B, size_t N1, size_t N2>
constexpr enable_if_t<B, descr<N1 - 1>> _(char const (&text1)[N1], char const (&text2)[N2]) {
return const_name<B, N1, N2>(text1, text2);
}
template <bool B, size_t N1, size_t N2>
constexpr enable_if_t<!B, descr<N2 - 1>> _(char const (&text1)[N1], char const (&text2)[N2]) {
return const_name<B, N1, N2>(text1, text2);
}
template <bool B, typename T1, typename T2>
constexpr enable_if_t<B, T1> _(const T1 &d1, const T2 &d2) {
return const_name<B, T1, T2>(d1, d2);
}
template <bool B, typename T1, typename T2>
constexpr enable_if_t<!B, T2> _(const T1 &d1, const T2 &d2) {
return const_name<B, T1, T2>(d1, d2);
}
template <size_t Size>
auto constexpr _() -> remove_cv_t<decltype(int_to_str<Size / 10, Size % 10>::digits)> {
return const_name<Size>();
}
template <typename Type>
constexpr descr<1, Type> _() {
return const_name<Type>();
}
#endif // #ifndef _
constexpr descr<0> concat() { return {}; }
template <size_t N, typename... Ts>
constexpr descr<N, Ts...> concat(const descr<N, Ts...> &descr) {
return descr;
}
#ifdef __cpp_fold_expressions
template <size_t N1, size_t N2, typename... Ts1, typename... Ts2>
constexpr descr<N1 + N2 + 2, Ts1..., Ts2...> operator,(const descr<N1, Ts1...> &a,
const descr<N2, Ts2...> &b) {
return a + const_name(", ") + b;
}
template <size_t N, typename... Ts, typename... Args>
constexpr auto concat(const descr<N, Ts...> &d, const Args &...args) {
return (d, ..., args);
}
#else
template <size_t N, typename... Ts, typename... Args>
constexpr auto concat(const descr<N, Ts...> &d,
const Args &...args) -> decltype(std::declval<descr<N + 2, Ts...>>()
+ concat(args...)) {
return d + const_name(", ") + concat(args...);
}
#endif
template <size_t N, typename... Ts>
constexpr descr<N + 2, Ts...> type_descr(const descr<N, Ts...> &descr) {
return const_name("{") + descr + const_name("}");
}
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=============================================================================================================================================
SOURCE CODE FILE: exception_translation.h
LINES: 1
SIZE: 2.61 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\detail\exception_translation.h
ENCODING: utf-8
```h
/*
pybind11/detail/exception_translation.h: means to translate C++ exceptions to Python exceptions
Copyright (c) 2024 The Pybind Development Team.
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "common.h"
#include "internals.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
// Apply all the extensions translators from a list
// Return true if one of the translators completed without raising an exception
// itself. Return of false indicates that if there are other translators
// available, they should be tried.
inline bool apply_exception_translators(std::forward_list<ExceptionTranslator> &translators) {
auto last_exception = std::current_exception();
for (auto &translator : translators) {
try {
translator(last_exception);
return true;
} catch (...) {
last_exception = std::current_exception();
}
}
return false;
}
inline void try_translate_exceptions() {
/* When an exception is caught, give each registered exception
translator a chance to translate it to a Python exception. First
all module-local translators will be tried in reverse order of
registration. If none of the module-locale translators handle
the exception (or there are no module-locale translators) then
the global translators will be tried, also in reverse order of
registration.
A translator may choose to do one of the following:
- catch the exception and call py::set_error()
to set a standard (or custom) Python exception, or
- do nothing and let the exception fall through to the next translator, or
- delegate translation to the next translator by throwing a new type of exception.
*/
bool handled = with_internals([&](internals &internals) {
auto &local_exception_translators = get_local_internals().registered_exception_translators;
if (detail::apply_exception_translators(local_exception_translators)) {
return true;
}
auto &exception_translators = internals.registered_exception_translators;
if (detail::apply_exception_translators(exception_translators)) {
return true;
}
return false;
});
if (!handled) {
set_error(PyExc_SystemError, "Exception escaped from default exception translator!");
}
}
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
============================================================================================================================
SOURCE CODE FILE: init.h
LINES: 1
SIZE: 17.99 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\detail\init.h
ENCODING: utf-8
```h
/*
pybind11/detail/init.h: init factory function implementation and support code.
Copyright (c) 2017 Jason Rhinelander <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "class.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_WARNING_DISABLE_MSVC(4127)
PYBIND11_NAMESPACE_BEGIN(detail)
template <>
class type_caster<value_and_holder> {
public:
bool load(handle h, bool) {
value = reinterpret_cast<value_and_holder *>(h.ptr());
return true;
}
template <typename>
using cast_op_type = value_and_holder &;
explicit operator value_and_holder &() { return *value; }
static constexpr auto name = const_name<value_and_holder>();
private:
value_and_holder *value = nullptr;
};
PYBIND11_NAMESPACE_BEGIN(initimpl)
inline void no_nullptr(void *ptr) {
if (!ptr) {
throw type_error("pybind11::init(): factory function returned nullptr");
}
}
// Implementing functions for all forms of py::init<...> and py::init(...)
template <typename Class>
using Cpp = typename Class::type;
template <typename Class>
using Alias = typename Class::type_alias;
template <typename Class>
using Holder = typename Class::holder_type;
template <typename Class>
using is_alias_constructible = std::is_constructible<Alias<Class>, Cpp<Class> &&>;
// Takes a Cpp pointer and returns true if it actually is a polymorphic Alias instance.
template <typename Class, enable_if_t<Class::has_alias, int> = 0>
bool is_alias(Cpp<Class> *ptr) {
return dynamic_cast<Alias<Class> *>(ptr) != nullptr;
}
// Failing fallback version of the above for a no-alias class (always returns false)
template <typename /*Class*/>
constexpr bool is_alias(void *) {
return false;
}
// Constructs and returns a new object; if the given arguments don't map to a constructor, we fall
// back to brace aggregate initialization so that for aggregate initialization can be used with
// py::init, e.g. `py::init<int, int>` to initialize a `struct T { int a; int b; }`. For
// non-aggregate types, we need to use an ordinary T(...) constructor (invoking as `T{...}` usually
// works, but will not do the expected thing when `T` has an `initializer_list<T>` constructor).
template <typename Class,
typename... Args,
detail::enable_if_t<std::is_constructible<Class, Args...>::value, int> = 0>
inline Class *construct_or_initialize(Args &&...args) {
return new Class(std::forward<Args>(args)...);
}
template <typename Class,
typename... Args,
detail::enable_if_t<!std::is_constructible<Class, Args...>::value, int> = 0>
inline Class *construct_or_initialize(Args &&...args) {
return new Class{std::forward<Args>(args)...};
}
// Attempts to constructs an alias using a `Alias(Cpp &&)` constructor. This allows types with
// an alias to provide only a single Cpp factory function as long as the Alias can be
// constructed from an rvalue reference of the base Cpp type. This means that Alias classes
// can, when appropriate, simply define a `Alias(Cpp &&)` constructor rather than needing to
// inherit all the base class constructors.
template <typename Class>
void construct_alias_from_cpp(std::true_type /*is_alias_constructible*/,
value_and_holder &v_h,
Cpp<Class> &&base) {
v_h.value_ptr() = new Alias<Class>(std::move(base));
}
template <typename Class>
[[noreturn]] void construct_alias_from_cpp(std::false_type /*!is_alias_constructible*/,
value_and_holder &,
Cpp<Class> &&) {
throw type_error("pybind11::init(): unable to convert returned instance to required "
"alias class: no `Alias<Class>(Class &&)` constructor available");
}
// Error-generating fallback for factories that don't match one of the below construction
// mechanisms.
template <typename Class>
void construct(...) {
static_assert(!std::is_same<Class, Class>::value /* always false */,
"pybind11::init(): init function must return a compatible pointer, "
"holder, or value");
}
// Pointer return v1: the factory function returns a class pointer for a registered class.
// If we don't need an alias (because this class doesn't have one, or because the final type is
// inherited on the Python side) we can simply take over ownership. Otherwise we need to try to
// construct an Alias from the returned base instance.
template <typename Class>
void construct(value_and_holder &v_h, Cpp<Class> *ptr, bool need_alias) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(need_alias);
no_nullptr(ptr);
if (Class::has_alias && need_alias && !is_alias<Class>(ptr)) {
// We're going to try to construct an alias by moving the cpp type. Whether or not
// that succeeds, we still need to destroy the original cpp pointer (either the
// moved away leftover, if the alias construction works, or the value itself if we
// throw an error), but we can't just call `delete ptr`: it might have a special
// deleter, or might be shared_from_this. So we construct a holder around it as if
// it was a normal instance, then steal the holder away into a local variable; thus
// the holder and destruction happens when we leave the C++ scope, and the holder
// class gets to handle the destruction however it likes.
v_h.value_ptr() = ptr;
v_h.set_instance_registered(true); // Trick to prevent init_instance from registering it
// DANGER ZONE BEGIN: exceptions will leave v_h in an invalid state.
v_h.type->init_instance(v_h.inst, nullptr); // Set up the holder
Holder<Class> temp_holder(std::move(v_h.holder<Holder<Class>>())); // Steal the holder
v_h.type->dealloc(v_h); // Destroys the moved-out holder remains, resets value ptr to null
v_h.set_instance_registered(false);
// DANGER ZONE END.
construct_alias_from_cpp<Class>(is_alias_constructible<Class>{}, v_h, std::move(*ptr));
} else {
// Otherwise the type isn't inherited, so we don't need an Alias
v_h.value_ptr() = ptr;
}
}
// Pointer return v2: a factory that always returns an alias instance ptr. We simply take over
// ownership of the pointer.
template <typename Class, enable_if_t<Class::has_alias, int> = 0>
void construct(value_and_holder &v_h, Alias<Class> *alias_ptr, bool) {
no_nullptr(alias_ptr);
v_h.value_ptr() = static_cast<Cpp<Class> *>(alias_ptr);
}
// Holder return: copy its pointer, and move or copy the returned holder into the new instance's
// holder. This also handles types like std::shared_ptr<T> and std::unique_ptr<T> where T is a
// derived type (through those holder's implicit conversion from derived class holder
// constructors).
template <typename Class>
void construct(value_and_holder &v_h, Holder<Class> holder, bool need_alias) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(need_alias);
auto *ptr = holder_helper<Holder<Class>>::get(holder);
no_nullptr(ptr);
// If we need an alias, check that the held pointer is actually an alias instance
if (Class::has_alias && need_alias && !is_alias<Class>(ptr)) {
throw type_error("pybind11::init(): construction failed: returned holder-wrapped instance "
"is not an alias instance");
}
v_h.value_ptr() = ptr;
v_h.type->init_instance(v_h.inst, &holder);
}
// return-by-value version 1: returning a cpp class by value. If the class has an alias and an
// alias is required the alias must have an `Alias(Cpp &&)` constructor so that we can construct
// the alias from the base when needed (i.e. because of Python-side inheritance). When we don't
// need it, we simply move-construct the cpp value into a new instance.
template <typename Class>
void construct(value_and_holder &v_h, Cpp<Class> &&result, bool need_alias) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(need_alias);
static_assert(is_move_constructible<Cpp<Class>>::value,
"pybind11::init() return-by-value factory function requires a movable class");
if (Class::has_alias && need_alias) {
construct_alias_from_cpp<Class>(is_alias_constructible<Class>{}, v_h, std::move(result));
} else {
v_h.value_ptr() = new Cpp<Class>(std::move(result));
}
}
// return-by-value version 2: returning a value of the alias type itself. We move-construct an
// Alias instance (even if no the python-side inheritance is involved). The is intended for
// cases where Alias initialization is always desired.
template <typename Class>
void construct(value_and_holder &v_h, Alias<Class> &&result, bool) {
static_assert(
is_move_constructible<Alias<Class>>::value,
"pybind11::init() return-by-alias-value factory function requires a movable alias class");
v_h.value_ptr() = new Alias<Class>(std::move(result));
}
// Implementing class for py::init<...>()
template <typename... Args>
struct constructor {
template <typename Class, typename... Extra, enable_if_t<!Class::has_alias, int> = 0>
static void execute(Class &cl, const Extra &...extra) {
cl.def(
"__init__",
[](value_and_holder &v_h, Args... args) {
v_h.value_ptr() = construct_or_initialize<Cpp<Class>>(std::forward<Args>(args)...);
},
is_new_style_constructor(),
extra...);
}
template <
typename Class,
typename... Extra,
enable_if_t<Class::has_alias && std::is_constructible<Cpp<Class>, Args...>::value, int>
= 0>
static void execute(Class &cl, const Extra &...extra) {
cl.def(
"__init__",
[](value_and_holder &v_h, Args... args) {
if (Py_TYPE(v_h.inst) == v_h.type->type) {
v_h.value_ptr()
= construct_or_initialize<Cpp<Class>>(std::forward<Args>(args)...);
} else {
v_h.value_ptr()
= construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);
}
},
is_new_style_constructor(),
extra...);
}
template <
typename Class,
typename... Extra,
enable_if_t<Class::has_alias && !std::is_constructible<Cpp<Class>, Args...>::value, int>
= 0>
static void execute(Class &cl, const Extra &...extra) {
cl.def(
"__init__",
[](value_and_holder &v_h, Args... args) {
v_h.value_ptr()
= construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);
},
is_new_style_constructor(),
extra...);
}
};
// Implementing class for py::init_alias<...>()
template <typename... Args>
struct alias_constructor {
template <
typename Class,
typename... Extra,
enable_if_t<Class::has_alias && std::is_constructible<Alias<Class>, Args...>::value, int>
= 0>
static void execute(Class &cl, const Extra &...extra) {
cl.def(
"__init__",
[](value_and_holder &v_h, Args... args) {
v_h.value_ptr()
= construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);
},
is_new_style_constructor(),
extra...);
}
};
// Implementation class for py::init(Func) and py::init(Func, AliasFunc)
template <typename CFunc,
typename AFunc = void_type (*)(),
typename = function_signature_t<CFunc>,
typename = function_signature_t<AFunc>>
struct factory;
// Specialization for py::init(Func)
template <typename Func, typename Return, typename... Args>
struct factory<Func, void_type (*)(), Return(Args...)> {
remove_reference_t<Func> class_factory;
// NOLINTNEXTLINE(google-explicit-constructor)
factory(Func &&f) : class_factory(std::forward<Func>(f)) {}
// The given class either has no alias or has no separate alias factory;
// this always constructs the class itself. If the class is registered with an alias
// type and an alias instance is needed (i.e. because the final type is a Python class
// inheriting from the C++ type) the returned value needs to either already be an alias
// instance, or the alias needs to be constructible from a `Class &&` argument.
template <typename Class, typename... Extra>
void execute(Class &cl, const Extra &...extra) && {
#if defined(PYBIND11_CPP14)
cl.def(
"__init__",
[func = std::move(class_factory)]
#else
auto &func = class_factory;
cl.def(
"__init__",
[func]
#endif
(value_and_holder &v_h, Args... args) {
construct<Class>(
v_h, func(std::forward<Args>(args)...), Py_TYPE(v_h.inst) != v_h.type->type);
},
is_new_style_constructor(),
extra...);
}
};
// Specialization for py::init(Func, AliasFunc)
template <typename CFunc,
typename AFunc,
typename CReturn,
typename... CArgs,
typename AReturn,
typename... AArgs>
struct factory<CFunc, AFunc, CReturn(CArgs...), AReturn(AArgs...)> {
static_assert(sizeof...(CArgs) == sizeof...(AArgs),
"pybind11::init(class_factory, alias_factory): class and alias factories "
"must have identical argument signatures");
static_assert(all_of<std::is_same<CArgs, AArgs>...>::value,
"pybind11::init(class_factory, alias_factory): class and alias factories "
"must have identical argument signatures");
remove_reference_t<CFunc> class_factory;
remove_reference_t<AFunc> alias_factory;
factory(CFunc &&c, AFunc &&a)
: class_factory(std::forward<CFunc>(c)), alias_factory(std::forward<AFunc>(a)) {}
// The class factory is called when the `self` type passed to `__init__` is the direct
// class (i.e. not inherited), the alias factory when `self` is a Python-side subtype.
template <typename Class, typename... Extra>
void execute(Class &cl, const Extra &...extra) && {
static_assert(Class::has_alias,
"The two-argument version of `py::init()` can "
"only be used if the class has an alias");
#if defined(PYBIND11_CPP14)
cl.def(
"__init__",
[class_func = std::move(class_factory), alias_func = std::move(alias_factory)]
#else
auto &class_func = class_factory;
auto &alias_func = alias_factory;
cl.def(
"__init__",
[class_func, alias_func]
#endif
(value_and_holder &v_h, CArgs... args) {
if (Py_TYPE(v_h.inst) == v_h.type->type) {
// If the instance type equals the registered type we don't have inheritance,
// so don't need the alias and can construct using the class function:
construct<Class>(v_h, class_func(std::forward<CArgs>(args)...), false);
} else {
construct<Class>(v_h, alias_func(std::forward<CArgs>(args)...), true);
}
},
is_new_style_constructor(),
extra...);
}
};
/// Set just the C++ state. Same as `__init__`.
template <typename Class, typename T>
void setstate(value_and_holder &v_h, T &&result, bool need_alias) {
construct<Class>(v_h, std::forward<T>(result), need_alias);
}
/// Set both the C++ and Python states
template <typename Class,
typename T,
typename O,
enable_if_t<std::is_convertible<O, handle>::value, int> = 0>
void setstate(value_and_holder &v_h, std::pair<T, O> &&result, bool need_alias) {
construct<Class>(v_h, std::move(result.first), need_alias);
auto d = handle(result.second);
if (PyDict_Check(d.ptr()) && PyDict_Size(d.ptr()) == 0) {
// Skipping setattr below, to not force use of py::dynamic_attr() for Class unnecessarily.
// See PR #2972 for details.
return;
}
setattr((PyObject *) v_h.inst, "__dict__", d);
}
/// Implementation for py::pickle(GetState, SetState)
template <typename Get,
typename Set,
typename = function_signature_t<Get>,
typename = function_signature_t<Set>>
struct pickle_factory;
template <typename Get,
typename Set,
typename RetState,
typename Self,
typename NewInstance,
typename ArgState>
struct pickle_factory<Get, Set, RetState(Self), NewInstance(ArgState)> {
static_assert(std::is_same<intrinsic_t<RetState>, intrinsic_t<ArgState>>::value,
"The type returned by `__getstate__` must be the same "
"as the argument accepted by `__setstate__`");
remove_reference_t<Get> get;
remove_reference_t<Set> set;
pickle_factory(Get get, Set set) : get(std::forward<Get>(get)), set(std::forward<Set>(set)) {}
template <typename Class, typename... Extra>
void execute(Class &cl, const Extra &...extra) && {
cl.def("__getstate__", std::move(get));
#if defined(PYBIND11_CPP14)
cl.def(
"__setstate__",
[func = std::move(set)]
#else
auto &func = set;
cl.def(
"__setstate__",
[func]
#endif
(value_and_holder &v_h, ArgState state) {
setstate<Class>(
v_h, func(std::forward<ArgState>(state)), Py_TYPE(v_h.inst) != v_h.type->type);
},
is_new_style_constructor(),
extra...);
}
};
PYBIND11_NAMESPACE_END(initimpl)
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=================================================================================================================================
SOURCE CODE FILE: internals.h
LINES: 1
SIZE: 31.98 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\detail\internals.h
ENCODING: utf-8
```h
/*
pybind11/detail/internals.h: Internal data structure and related functions
Copyright (c) 2017 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "common.h"
#if defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
# include <pybind11/gil.h>
#endif
#include <pybind11/pytypes.h>
#include <exception>
#include <mutex>
#include <thread>
/// Tracks the `internals` and `type_info` ABI version independent of the main library version.
///
/// Some portions of the code use an ABI that is conditional depending on this
/// version number. That allows ABI-breaking changes to be "pre-implemented".
/// Once the default version number is incremented, the conditional logic that
/// no longer applies can be removed. Additionally, users that need not
/// maintain ABI compatibility can increase the version number in order to take
/// advantage of any functionality/efficiency improvements that depend on the
/// newer ABI.
///
/// WARNING: If you choose to manually increase the ABI version, note that
/// pybind11 may not be tested as thoroughly with a non-default ABI version, and
/// further ABI-incompatible changes may be made before the ABI is officially
/// changed to the new version.
#ifndef PYBIND11_INTERNALS_VERSION
# if PY_VERSION_HEX >= 0x030C0000 || defined(_MSC_VER)
// Version bump for Python 3.12+, before first 3.12 beta release.
// Version bump for MSVC piggy-backed on PR #4779. See comments there.
# define PYBIND11_INTERNALS_VERSION 5
# else
# define PYBIND11_INTERNALS_VERSION 4
# endif
#endif
// This requirement is mainly to reduce the support burden (see PR #4570).
static_assert(PY_VERSION_HEX < 0x030C0000 || PYBIND11_INTERNALS_VERSION >= 5,
"pybind11 ABI version 5 is the minimum for Python 3.12+");
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
using ExceptionTranslator = void (*)(std::exception_ptr);
PYBIND11_NAMESPACE_BEGIN(detail)
constexpr const char *internals_function_record_capsule_name = "pybind11_function_record_capsule";
// Forward declarations
inline PyTypeObject *make_static_property_type();
inline PyTypeObject *make_default_metaclass();
inline PyObject *make_object_base_type(PyTypeObject *metaclass);
// The old Python Thread Local Storage (TLS) API is deprecated in Python 3.7 in favor of the new
// Thread Specific Storage (TSS) API.
// Avoid unnecessary allocation of `Py_tss_t`, since we cannot use
// `Py_LIMITED_API` anyway.
#if PYBIND11_INTERNALS_VERSION > 4
# define PYBIND11_TLS_KEY_REF Py_tss_t &
# if defined(__clang__)
# define PYBIND11_TLS_KEY_INIT(var) \
_Pragma("clang diagnostic push") /**/ \
_Pragma("clang diagnostic ignored \"-Wmissing-field-initializers\"") /**/ \
Py_tss_t var \
= Py_tss_NEEDS_INIT; \
_Pragma("clang diagnostic pop")
# elif defined(__GNUC__) && !defined(__INTEL_COMPILER)
# define PYBIND11_TLS_KEY_INIT(var) \
_Pragma("GCC diagnostic push") /**/ \
_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"") /**/ \
Py_tss_t var \
= Py_tss_NEEDS_INIT; \
_Pragma("GCC diagnostic pop")
# else
# define PYBIND11_TLS_KEY_INIT(var) Py_tss_t var = Py_tss_NEEDS_INIT;
# endif
# define PYBIND11_TLS_KEY_CREATE(var) (PyThread_tss_create(&(var)) == 0)
# define PYBIND11_TLS_GET_VALUE(key) PyThread_tss_get(&(key))
# define PYBIND11_TLS_REPLACE_VALUE(key, value) PyThread_tss_set(&(key), (value))
# define PYBIND11_TLS_DELETE_VALUE(key) PyThread_tss_set(&(key), nullptr)
# define PYBIND11_TLS_FREE(key) PyThread_tss_delete(&(key))
#else
# define PYBIND11_TLS_KEY_REF Py_tss_t *
# define PYBIND11_TLS_KEY_INIT(var) Py_tss_t *var = nullptr;
# define PYBIND11_TLS_KEY_CREATE(var) \
(((var) = PyThread_tss_alloc()) != nullptr && (PyThread_tss_create((var)) == 0))
# define PYBIND11_TLS_GET_VALUE(key) PyThread_tss_get((key))
# define PYBIND11_TLS_REPLACE_VALUE(key, value) PyThread_tss_set((key), (value))
# define PYBIND11_TLS_DELETE_VALUE(key) PyThread_tss_set((key), nullptr)
# define PYBIND11_TLS_FREE(key) PyThread_tss_free(key)
#endif
// Python loads modules by default with dlopen with the RTLD_LOCAL flag; under libc++ and possibly
// other STLs, this means `typeid(A)` from one module won't equal `typeid(A)` from another module
// even when `A` is the same, non-hidden-visibility type (e.g. from a common include). Under
// libstdc++, this doesn't happen: equality and the type_index hash are based on the type name,
// which works. If not under a known-good stl, provide our own name-based hash and equality
// functions that use the type name.
#if (PYBIND11_INTERNALS_VERSION <= 4 && defined(__GLIBCXX__)) \
|| (PYBIND11_INTERNALS_VERSION >= 5 && !defined(_LIBCPP_VERSION))
inline bool same_type(const std::type_info &lhs, const std::type_info &rhs) { return lhs == rhs; }
using type_hash = std::hash<std::type_index>;
using type_equal_to = std::equal_to<std::type_index>;
#else
inline bool same_type(const std::type_info &lhs, const std::type_info &rhs) {
return lhs.name() == rhs.name() || std::strcmp(lhs.name(), rhs.name()) == 0;
}
struct type_hash {
size_t operator()(const std::type_index &t) const {
size_t hash = 5381;
const char *ptr = t.name();
while (auto c = static_cast<unsigned char>(*ptr++)) {
hash = (hash * 33) ^ c;
}
return hash;
}
};
struct type_equal_to {
bool operator()(const std::type_index &lhs, const std::type_index &rhs) const {
return lhs.name() == rhs.name() || std::strcmp(lhs.name(), rhs.name()) == 0;
}
};
#endif
template <typename value_type>
using type_map = std::unordered_map<std::type_index, value_type, type_hash, type_equal_to>;
struct override_hash {
inline size_t operator()(const std::pair<const PyObject *, const char *> &v) const {
size_t value = std::hash<const void *>()(v.first);
value ^= std::hash<const void *>()(v.second) + 0x9e3779b9 + (value << 6) + (value >> 2);
return value;
}
};
using instance_map = std::unordered_multimap<const void *, instance *>;
#ifdef Py_GIL_DISABLED
// Wrapper around PyMutex to provide BasicLockable semantics
class pymutex {
PyMutex mutex;
public:
pymutex() : mutex({}) {}
void lock() { PyMutex_Lock(&mutex); }
void unlock() { PyMutex_Unlock(&mutex); }
};
// Instance map shards are used to reduce mutex contention in free-threaded Python.
struct instance_map_shard {
instance_map registered_instances;
pymutex mutex;
// alignas(64) would be better, but causes compile errors in macOS before 10.14 (see #5200)
char padding[64 - (sizeof(instance_map) + sizeof(pymutex)) % 64];
};
static_assert(sizeof(instance_map_shard) % 64 == 0,
"instance_map_shard size is not a multiple of 64 bytes");
#endif
/// Internal data structure used to track registered instances and types.
/// Whenever binary incompatible changes are made to this structure,
/// `PYBIND11_INTERNALS_VERSION` must be incremented.
struct internals {
#ifdef Py_GIL_DISABLED
pymutex mutex;
#endif
// std::type_index -> pybind11's type information
type_map<type_info *> registered_types_cpp;
// PyTypeObject* -> base type_info(s)
std::unordered_map<PyTypeObject *, std::vector<type_info *>> registered_types_py;
#ifdef Py_GIL_DISABLED
std::unique_ptr<instance_map_shard[]> instance_shards; // void * -> instance*
size_t instance_shards_mask;
#else
instance_map registered_instances; // void * -> instance*
#endif
std::unordered_set<std::pair<const PyObject *, const char *>, override_hash>
inactive_override_cache;
type_map<std::vector<bool (*)(PyObject *, void *&)>> direct_conversions;
std::unordered_map<const PyObject *, std::vector<PyObject *>> patients;
std::forward_list<ExceptionTranslator> registered_exception_translators;
std::unordered_map<std::string, void *> shared_data; // Custom data to be shared across
// extensions
#if PYBIND11_INTERNALS_VERSION == 4
std::vector<PyObject *> unused_loader_patient_stack_remove_at_v5;
#endif
std::forward_list<std::string> static_strings; // Stores the std::strings backing
// detail::c_str()
PyTypeObject *static_property_type;
PyTypeObject *default_metaclass;
PyObject *instance_base;
// Unused if PYBIND11_SIMPLE_GIL_MANAGEMENT is defined:
PYBIND11_TLS_KEY_INIT(tstate)
#if PYBIND11_INTERNALS_VERSION > 4
PYBIND11_TLS_KEY_INIT(loader_life_support_tls_key)
#endif // PYBIND11_INTERNALS_VERSION > 4
// Unused if PYBIND11_SIMPLE_GIL_MANAGEMENT is defined:
PyInterpreterState *istate = nullptr;
#if PYBIND11_INTERNALS_VERSION > 4
// Note that we have to use a std::string to allocate memory to ensure a unique address
// We want unique addresses since we use pointer equality to compare function records
std::string function_record_capsule_name = internals_function_record_capsule_name;
#endif
internals() = default;
internals(const internals &other) = delete;
internals &operator=(const internals &other) = delete;
~internals() {
#if PYBIND11_INTERNALS_VERSION > 4
PYBIND11_TLS_FREE(loader_life_support_tls_key);
#endif // PYBIND11_INTERNALS_VERSION > 4
// This destructor is called *after* Py_Finalize() in finalize_interpreter().
// That *SHOULD BE* fine. The following details what happens when PyThread_tss_free is
// called. PYBIND11_TLS_FREE is PyThread_tss_free on python 3.7+. On older python, it does
// nothing. PyThread_tss_free calls PyThread_tss_delete and PyMem_RawFree.
// PyThread_tss_delete just calls TlsFree (on Windows) or pthread_key_delete (on *NIX).
// Neither of those have anything to do with CPython internals. PyMem_RawFree *requires*
// that the `tstate` be allocated with the CPython allocator.
PYBIND11_TLS_FREE(tstate);
}
};
/// Additional type information which does not fit into the PyTypeObject.
/// Changes to this struct also require bumping `PYBIND11_INTERNALS_VERSION`.
struct type_info {
PyTypeObject *type;
const std::type_info *cpptype;
size_t type_size, type_align, holder_size_in_ptrs;
void *(*operator_new)(size_t);
void (*init_instance)(instance *, const void *);
void (*dealloc)(value_and_holder &v_h);
std::vector<PyObject *(*) (PyObject *, PyTypeObject *)> implicit_conversions;
std::vector<std::pair<const std::type_info *, void *(*) (void *)>> implicit_casts;
std::vector<bool (*)(PyObject *, void *&)> *direct_conversions;
buffer_info *(*get_buffer)(PyObject *, void *) = nullptr;
void *get_buffer_data = nullptr;
void *(*module_local_load)(PyObject *, const type_info *) = nullptr;
/* A simple type never occurs as a (direct or indirect) parent
* of a class that makes use of multiple inheritance.
* A type can be simple even if it has non-simple ancestors as long as it has no descendants.
*/
bool simple_type : 1;
/* True if there is no multiple inheritance in this type's inheritance tree */
bool simple_ancestors : 1;
/* for base vs derived holder_type checks */
bool default_holder : 1;
/* true if this is a type registered with py::module_local */
bool module_local : 1;
};
/// On MSVC, debug and release builds are not ABI-compatible!
#if defined(_MSC_VER) && defined(_DEBUG)
# define PYBIND11_BUILD_TYPE "_debug"
#else
# define PYBIND11_BUILD_TYPE ""
#endif
/// Let's assume that different compilers are ABI-incompatible.
/// A user can manually set this string if they know their
/// compiler is compatible.
#ifndef PYBIND11_COMPILER_TYPE
# if defined(_MSC_VER)
# define PYBIND11_COMPILER_TYPE "_msvc"
# elif defined(__INTEL_COMPILER)
# define PYBIND11_COMPILER_TYPE "_icc"
# elif defined(__clang__)
# define PYBIND11_COMPILER_TYPE "_clang"
# elif defined(__PGI)
# define PYBIND11_COMPILER_TYPE "_pgi"
# elif defined(__MINGW32__)
# define PYBIND11_COMPILER_TYPE "_mingw"
# elif defined(__CYGWIN__)
# define PYBIND11_COMPILER_TYPE "_gcc_cygwin"
# elif defined(__GNUC__)
# define PYBIND11_COMPILER_TYPE "_gcc"
# else
# define PYBIND11_COMPILER_TYPE "_unknown"
# endif
#endif
/// Also standard libs
#ifndef PYBIND11_STDLIB
# if defined(_LIBCPP_VERSION)
# define PYBIND11_STDLIB "_libcpp"
# elif defined(__GLIBCXX__) || defined(__GLIBCPP__)
# define PYBIND11_STDLIB "_libstdcpp"
# else
# define PYBIND11_STDLIB ""
# endif
#endif
/// On Linux/OSX, changes in __GXX_ABI_VERSION__ indicate ABI incompatibility.
/// On MSVC, changes in _MSC_VER may indicate ABI incompatibility (#2898).
#ifndef PYBIND11_BUILD_ABI
# if defined(__GXX_ABI_VERSION)
# define PYBIND11_BUILD_ABI "_cxxabi" PYBIND11_TOSTRING(__GXX_ABI_VERSION)
# elif defined(_MSC_VER)
# define PYBIND11_BUILD_ABI "_mscver" PYBIND11_TOSTRING(_MSC_VER)
# else
# define PYBIND11_BUILD_ABI ""
# endif
#endif
#ifndef PYBIND11_INTERNALS_KIND
# define PYBIND11_INTERNALS_KIND ""
#endif
#define PYBIND11_PLATFORM_ABI_ID \
PYBIND11_INTERNALS_KIND PYBIND11_COMPILER_TYPE PYBIND11_STDLIB PYBIND11_BUILD_ABI \
PYBIND11_BUILD_TYPE
#define PYBIND11_INTERNALS_ID \
"__pybind11_internals_v" PYBIND11_TOSTRING(PYBIND11_INTERNALS_VERSION) \
PYBIND11_PLATFORM_ABI_ID "__"
#define PYBIND11_MODULE_LOCAL_ID \
"__pybind11_module_local_v" PYBIND11_TOSTRING(PYBIND11_INTERNALS_VERSION) \
PYBIND11_PLATFORM_ABI_ID "__"
/// Each module locally stores a pointer to the `internals` data. The data
/// itself is shared among modules with the same `PYBIND11_INTERNALS_ID`.
inline internals **&get_internals_pp() {
static internals **internals_pp = nullptr;
return internals_pp;
}
// forward decl
inline void translate_exception(std::exception_ptr);
template <class T,
enable_if_t<std::is_same<std::nested_exception, remove_cvref_t<T>>::value, int> = 0>
bool handle_nested_exception(const T &exc, const std::exception_ptr &p) {
std::exception_ptr nested = exc.nested_ptr();
if (nested != nullptr && nested != p) {
translate_exception(nested);
return true;
}
return false;
}
template <class T,
enable_if_t<!std::is_same<std::nested_exception, remove_cvref_t<T>>::value, int> = 0>
bool handle_nested_exception(const T &exc, const std::exception_ptr &p) {
if (const auto *nep = dynamic_cast<const std::nested_exception *>(std::addressof(exc))) {
return handle_nested_exception(*nep, p);
}
return false;
}
inline bool raise_err(PyObject *exc_type, const char *msg) {
if (PyErr_Occurred()) {
raise_from(exc_type, msg);
return true;
}
set_error(exc_type, msg);
return false;
}
inline void translate_exception(std::exception_ptr p) {
if (!p) {
return;
}
try {
std::rethrow_exception(p);
} catch (error_already_set &e) {
handle_nested_exception(e, p);
e.restore();
return;
} catch (const builtin_exception &e) {
// Could not use template since it's an abstract class.
if (const auto *nep = dynamic_cast<const std::nested_exception *>(std::addressof(e))) {
handle_nested_exception(*nep, p);
}
e.set_error();
return;
} catch (const std::bad_alloc &e) {
handle_nested_exception(e, p);
raise_err(PyExc_MemoryError, e.what());
return;
} catch (const std::domain_error &e) {
handle_nested_exception(e, p);
raise_err(PyExc_ValueError, e.what());
return;
} catch (const std::invalid_argument &e) {
handle_nested_exception(e, p);
raise_err(PyExc_ValueError, e.what());
return;
} catch (const std::length_error &e) {
handle_nested_exception(e, p);
raise_err(PyExc_ValueError, e.what());
return;
} catch (const std::out_of_range &e) {
handle_nested_exception(e, p);
raise_err(PyExc_IndexError, e.what());
return;
} catch (const std::range_error &e) {
handle_nested_exception(e, p);
raise_err(PyExc_ValueError, e.what());
return;
} catch (const std::overflow_error &e) {
handle_nested_exception(e, p);
raise_err(PyExc_OverflowError, e.what());
return;
} catch (const std::exception &e) {
handle_nested_exception(e, p);
raise_err(PyExc_RuntimeError, e.what());
return;
} catch (const std::nested_exception &e) {
handle_nested_exception(e, p);
raise_err(PyExc_RuntimeError, "Caught an unknown nested exception!");
return;
} catch (...) {
raise_err(PyExc_RuntimeError, "Caught an unknown exception!");
return;
}
}
#if !defined(__GLIBCXX__)
inline void translate_local_exception(std::exception_ptr p) {
try {
if (p) {
std::rethrow_exception(p);
}
} catch (error_already_set &e) {
e.restore();
return;
} catch (const builtin_exception &e) {
e.set_error();
return;
}
}
#endif
inline object get_python_state_dict() {
object state_dict;
#if PYBIND11_INTERNALS_VERSION <= 4 || PY_VERSION_HEX < 0x03080000 || defined(PYPY_VERSION)
state_dict = reinterpret_borrow<object>(PyEval_GetBuiltins());
#else
# if PY_VERSION_HEX < 0x03090000
PyInterpreterState *istate = _PyInterpreterState_Get();
# else
PyInterpreterState *istate = PyInterpreterState_Get();
# endif
if (istate) {
state_dict = reinterpret_borrow<object>(PyInterpreterState_GetDict(istate));
}
#endif
if (!state_dict) {
raise_from(PyExc_SystemError, "pybind11::detail::get_python_state_dict() FAILED");
throw error_already_set();
}
return state_dict;
}
inline object get_internals_obj_from_state_dict(handle state_dict) {
return reinterpret_steal<object>(
dict_getitemstringref(state_dict.ptr(), PYBIND11_INTERNALS_ID));
}
inline internals **get_internals_pp_from_capsule(handle obj) {
void *raw_ptr = PyCapsule_GetPointer(obj.ptr(), /*name=*/nullptr);
if (raw_ptr == nullptr) {
raise_from(PyExc_SystemError, "pybind11::detail::get_internals_pp_from_capsule() FAILED");
throw error_already_set();
}
return static_cast<internals **>(raw_ptr);
}
inline uint64_t round_up_to_next_pow2(uint64_t x) {
// Round-up to the next power of two.
// See https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
x--;
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
x |= (x >> 32);
x++;
return x;
}
/// Return a reference to the current `internals` data
PYBIND11_NOINLINE internals &get_internals() {
auto **&internals_pp = get_internals_pp();
if (internals_pp && *internals_pp) {
return **internals_pp;
}
#if defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
gil_scoped_acquire gil;
#else
// Ensure that the GIL is held since we will need to make Python calls.
// Cannot use py::gil_scoped_acquire here since that constructor calls get_internals.
struct gil_scoped_acquire_local {
gil_scoped_acquire_local() : state(PyGILState_Ensure()) {}
gil_scoped_acquire_local(const gil_scoped_acquire_local &) = delete;
gil_scoped_acquire_local &operator=(const gil_scoped_acquire_local &) = delete;
~gil_scoped_acquire_local() { PyGILState_Release(state); }
const PyGILState_STATE state;
} gil;
#endif
error_scope err_scope;
dict state_dict = get_python_state_dict();
if (object internals_obj = get_internals_obj_from_state_dict(state_dict)) {
internals_pp = get_internals_pp_from_capsule(internals_obj);
}
if (internals_pp && *internals_pp) {
// We loaded the internals through `state_dict`, which means that our `error_already_set`
// and `builtin_exception` may be different local classes than the ones set up in the
// initial exception translator, below, so add another for our local exception classes.
//
// libstdc++ doesn't require this (types there are identified only by name)
// libc++ with CPython doesn't require this (types are explicitly exported)
// libc++ with PyPy still need it, awaiting further investigation
#if !defined(__GLIBCXX__)
(*internals_pp)->registered_exception_translators.push_front(&translate_local_exception);
#endif
} else {
if (!internals_pp) {
internals_pp = new internals *();
}
auto *&internals_ptr = *internals_pp;
internals_ptr = new internals();
PyThreadState *tstate = PyThreadState_Get();
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
if (!PYBIND11_TLS_KEY_CREATE(internals_ptr->tstate)) {
pybind11_fail("get_internals: could not successfully initialize the tstate TSS key!");
}
PYBIND11_TLS_REPLACE_VALUE(internals_ptr->tstate, tstate);
#if PYBIND11_INTERNALS_VERSION > 4
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
if (!PYBIND11_TLS_KEY_CREATE(internals_ptr->loader_life_support_tls_key)) {
pybind11_fail("get_internals: could not successfully initialize the "
"loader_life_support TSS key!");
}
#endif
internals_ptr->istate = tstate->interp;
state_dict[PYBIND11_INTERNALS_ID] = capsule(reinterpret_cast<void *>(internals_pp));
internals_ptr->registered_exception_translators.push_front(&translate_exception);
internals_ptr->static_property_type = make_static_property_type();
internals_ptr->default_metaclass = make_default_metaclass();
internals_ptr->instance_base = make_object_base_type(internals_ptr->default_metaclass);
#ifdef Py_GIL_DISABLED
// Scale proportional to the number of cores. 2x is a heuristic to reduce contention.
auto num_shards
= static_cast<size_t>(round_up_to_next_pow2(2 * std::thread::hardware_concurrency()));
if (num_shards == 0) {
num_shards = 1;
}
internals_ptr->instance_shards.reset(new instance_map_shard[num_shards]);
internals_ptr->instance_shards_mask = num_shards - 1;
#endif // Py_GIL_DISABLED
}
return **internals_pp;
}
// the internals struct (above) is shared between all the modules. local_internals are only
// for a single module. Any changes made to internals may require an update to
// PYBIND11_INTERNALS_VERSION, breaking backwards compatibility. local_internals is, by design,
// restricted to a single module. Whether a module has local internals or not should not
// impact any other modules, because the only things accessing the local internals is the
// module that contains them.
struct local_internals {
type_map<type_info *> registered_types_cpp;
std::forward_list<ExceptionTranslator> registered_exception_translators;
#if PYBIND11_INTERNALS_VERSION == 4
// For ABI compatibility, we can't store the loader_life_support TLS key in
// the `internals` struct directly. Instead, we store it in `shared_data` and
// cache a copy in `local_internals`. If we allocated a separate TLS key for
// each instance of `local_internals`, we could end up allocating hundreds of
// TLS keys if hundreds of different pybind11 modules are loaded (which is a
// plausible number).
PYBIND11_TLS_KEY_INIT(loader_life_support_tls_key)
// Holds the shared TLS key for the loader_life_support stack.
struct shared_loader_life_support_data {
PYBIND11_TLS_KEY_INIT(loader_life_support_tls_key)
shared_loader_life_support_data() {
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
if (!PYBIND11_TLS_KEY_CREATE(loader_life_support_tls_key)) {
pybind11_fail("local_internals: could not successfully initialize the "
"loader_life_support TLS key!");
}
}
// We can't help but leak the TLS key, because Python never unloads extension modules.
};
local_internals() {
auto &internals = get_internals();
// Get or create the `loader_life_support_stack_key`.
auto &ptr = internals.shared_data["_life_support"];
if (!ptr) {
ptr = new shared_loader_life_support_data;
}
loader_life_support_tls_key
= static_cast<shared_loader_life_support_data *>(ptr)->loader_life_support_tls_key;
}
#endif // PYBIND11_INTERNALS_VERSION == 4
};
/// Works like `get_internals`, but for things which are locally registered.
inline local_internals &get_local_internals() {
// Current static can be created in the interpreter finalization routine. If the later will be
// destroyed in another static variable destructor, creation of this static there will cause
// static deinitialization fiasco. In order to avoid it we avoid destruction of the
// local_internals static. One can read more about the problem and current solution here:
// https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables
static auto *locals = new local_internals();
return *locals;
}
#ifdef Py_GIL_DISABLED
# define PYBIND11_LOCK_INTERNALS(internals) std::unique_lock<pymutex> lock((internals).mutex)
#else
# define PYBIND11_LOCK_INTERNALS(internals)
#endif
template <typename F>
inline auto with_internals(const F &cb) -> decltype(cb(get_internals())) {
auto &internals = get_internals();
PYBIND11_LOCK_INTERNALS(internals);
return cb(internals);
}
inline std::uint64_t mix64(std::uint64_t z) {
// David Stafford's variant 13 of the MurmurHash3 finalizer popularized
// by the SplitMix PRNG.
// https://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html
z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9;
z = (z ^ (z >> 27)) * 0x94d049bb133111eb;
return z ^ (z >> 31);
}
template <typename F>
inline auto with_instance_map(const void *ptr,
const F &cb) -> decltype(cb(std::declval<instance_map &>())) {
auto &internals = get_internals();
#ifdef Py_GIL_DISABLED
// Hash address to compute shard, but ignore low bits. We'd like allocations
// from the same thread/core to map to the same shard and allocations from
// other threads/cores to map to other shards. Using the high bits is a good
// heuristic because memory allocators often have a per-thread
// arena/superblock/segment from which smaller allocations are served.
auto addr = reinterpret_cast<std::uintptr_t>(ptr);
auto hash = mix64(static_cast<std::uint64_t>(addr >> 20));
auto idx = static_cast<size_t>(hash & internals.instance_shards_mask);
auto &shard = internals.instance_shards[idx];
std::unique_lock<pymutex> lock(shard.mutex);
return cb(shard.registered_instances);
#else
(void) ptr;
return cb(internals.registered_instances);
#endif
}
// Returns the number of registered instances for testing purposes. The result may not be
// consistent if other threads are registering or unregistering instances concurrently.
inline size_t num_registered_instances() {
auto &internals = get_internals();
#ifdef Py_GIL_DISABLED
size_t count = 0;
for (size_t i = 0; i <= internals.instance_shards_mask; ++i) {
auto &shard = internals.instance_shards[i];
std::unique_lock<pymutex> lock(shard.mutex);
count += shard.registered_instances.size();
}
return count;
#else
return internals.registered_instances.size();
#endif
}
/// Constructs a std::string with the given arguments, stores it in `internals`, and returns its
/// `c_str()`. Such strings objects have a long storage duration -- the internal strings are only
/// cleared when the program exits or after interpreter shutdown (when embedding), and so are
/// suitable for c-style strings needed by Python internals (such as PyTypeObject's tp_name).
template <typename... Args>
const char *c_str(Args &&...args) {
// GCC 4.8 doesn't like parameter unpack within lambda capture, so use
// PYBIND11_LOCK_INTERNALS.
auto &internals = get_internals();
PYBIND11_LOCK_INTERNALS(internals);
auto &strings = internals.static_strings;
strings.emplace_front(std::forward<Args>(args)...);
return strings.front().c_str();
}
inline const char *get_function_record_capsule_name() {
#if PYBIND11_INTERNALS_VERSION > 4
return get_internals().function_record_capsule_name.c_str();
#else
return nullptr;
#endif
}
// Determine whether or not the following capsule contains a pybind11 function record.
// Note that we use `internals` to make sure that only ABI compatible records are touched.
//
// This check is currently used in two places:
// - An important optimization in functional.h to avoid overhead in C++ -> Python -> C++
// - The sibling feature of cpp_function to allow overloads
inline bool is_function_record_capsule(const capsule &cap) {
// Pointer equality as we rely on internals() to ensure unique pointers
return cap.name() == get_function_record_capsule_name();
}
PYBIND11_NAMESPACE_END(detail)
/// Returns a named pointer that is shared among all extension modules (using the same
/// pybind11 version) running in the current interpreter. Names starting with underscores
/// are reserved for internal usage. Returns `nullptr` if no matching entry was found.
PYBIND11_NOINLINE void *get_shared_data(const std::string &name) {
return detail::with_internals([&](detail::internals &internals) {
auto it = internals.shared_data.find(name);
return it != internals.shared_data.end() ? it->second : nullptr;
});
}
/// Set the shared data that can be later recovered by `get_shared_data()`.
PYBIND11_NOINLINE void *set_shared_data(const std::string &name, void *data) {
return detail::with_internals([&](detail::internals &internals) {
internals.shared_data[name] = data;
return data;
});
}
/// Returns a typed reference to a shared data entry (by using `get_shared_data()`) if
/// such entry exists. Otherwise, a new object of default-constructible type `T` is
/// added to the shared data under the given name and a reference to it is returned.
template <typename T>
T &get_or_create_shared_data(const std::string &name) {
return *detail::with_internals([&](detail::internals &internals) {
auto it = internals.shared_data.find(name);
T *ptr = (T *) (it != internals.shared_data.end() ? it->second : nullptr);
if (!ptr) {
ptr = new T();
internals.shared_data[name] = ptr;
}
return ptr;
});
}
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
========================================================================================================================================
SOURCE CODE FILE: type_caster_base.h
LINES: 1
SIZE: 48.96 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\detail\type_caster_base.h
ENCODING: utf-8
```h
/*
pybind11/detail/type_caster_base.h (originally first part of pybind11/cast.h)
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <pybind11/pytypes.h>
#include "common.h"
#include "cpp_conduit.h"
#include "descr.h"
#include "internals.h"
#include "typeid.h"
#include "value_and_holder.h"
#include <cstdint>
#include <cstring>
#include <iterator>
#include <new>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <typeindex>
#include <typeinfo>
#include <unordered_map>
#include <utility>
#include <vector>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
/// A life support system for temporary objects created by `type_caster::load()`.
/// Adding a patient will keep it alive up until the enclosing function returns.
class loader_life_support {
private:
loader_life_support *parent = nullptr;
std::unordered_set<PyObject *> keep_alive;
// Store stack pointer in thread-local storage.
static PYBIND11_TLS_KEY_REF get_stack_tls_key() {
#if PYBIND11_INTERNALS_VERSION == 4
return get_local_internals().loader_life_support_tls_key;
#else
return get_internals().loader_life_support_tls_key;
#endif
}
static loader_life_support *get_stack_top() {
return static_cast<loader_life_support *>(PYBIND11_TLS_GET_VALUE(get_stack_tls_key()));
}
static void set_stack_top(loader_life_support *value) {
PYBIND11_TLS_REPLACE_VALUE(get_stack_tls_key(), value);
}
public:
/// A new patient frame is created when a function is entered
loader_life_support() : parent{get_stack_top()} { set_stack_top(this); }
/// ... and destroyed after it returns
~loader_life_support() {
if (get_stack_top() != this) {
pybind11_fail("loader_life_support: internal error");
}
set_stack_top(parent);
for (auto *item : keep_alive) {
Py_DECREF(item);
}
}
/// This can only be used inside a pybind11-bound function, either by `argument_loader`
/// at argument preparation time or by `py::cast()` at execution time.
PYBIND11_NOINLINE static void add_patient(handle h) {
loader_life_support *frame = get_stack_top();
if (!frame) {
// NOTE: It would be nice to include the stack frames here, as this indicates
// use of pybind11::cast<> outside the normal call framework, finding such
// a location is challenging. Developers could consider printing out
// stack frame addresses here using something like __builtin_frame_address(0)
throw cast_error("When called outside a bound function, py::cast() cannot "
"do Python -> C++ conversions which require the creation "
"of temporary values");
}
if (frame->keep_alive.insert(h.ptr()).second) {
Py_INCREF(h.ptr());
}
}
};
// Gets the cache entry for the given type, creating it if necessary. The return value is the pair
// returned by emplace, i.e. an iterator for the entry and a bool set to `true` if the entry was
// just created.
inline std::pair<decltype(internals::registered_types_py)::iterator, bool>
all_type_info_get_cache(PyTypeObject *type);
// Band-aid workaround to fix a subtle but serious bug in a minimalistic fashion. See PR #4762.
inline void all_type_info_add_base_most_derived_first(std::vector<type_info *> &bases,
type_info *addl_base) {
for (auto it = bases.begin(); it != bases.end(); it++) {
type_info *existing_base = *it;
if (PyType_IsSubtype(addl_base->type, existing_base->type) != 0) {
bases.insert(it, addl_base);
return;
}
}
bases.push_back(addl_base);
}
// Populates a just-created cache entry.
PYBIND11_NOINLINE void all_type_info_populate(PyTypeObject *t, std::vector<type_info *> &bases) {
assert(bases.empty());
std::vector<PyTypeObject *> check;
for (handle parent : reinterpret_borrow<tuple>(t->tp_bases)) {
check.push_back((PyTypeObject *) parent.ptr());
}
auto const &type_dict = get_internals().registered_types_py;
for (size_t i = 0; i < check.size(); i++) {
auto *type = check[i];
// Ignore Python2 old-style class super type:
if (!PyType_Check((PyObject *) type)) {
continue;
}
// Check `type` in the current set of registered python types:
auto it = type_dict.find(type);
if (it != type_dict.end()) {
// We found a cache entry for it, so it's either pybind-registered or has pre-computed
// pybind bases, but we have to make sure we haven't already seen the type(s) before:
// we want to follow Python/virtual C++ rules that there should only be one instance of
// a common base.
for (auto *tinfo : it->second) {
// NB: Could use a second set here, rather than doing a linear search, but since
// having a large number of immediate pybind11-registered types seems fairly
// unlikely, that probably isn't worthwhile.
bool found = false;
for (auto *known : bases) {
if (known == tinfo) {
found = true;
break;
}
}
if (!found) {
all_type_info_add_base_most_derived_first(bases, tinfo);
}
}
} else if (type->tp_bases) {
// It's some python type, so keep follow its bases classes to look for one or more
// registered types
if (i + 1 == check.size()) {
// When we're at the end, we can pop off the current element to avoid growing
// `check` when adding just one base (which is typical--i.e. when there is no
// multiple inheritance)
check.pop_back();
i--;
}
for (handle parent : reinterpret_borrow<tuple>(type->tp_bases)) {
check.push_back((PyTypeObject *) parent.ptr());
}
}
}
}
/**
* Extracts vector of type_info pointers of pybind-registered roots of the given Python type. Will
* be just 1 pybind type for the Python type of a pybind-registered class, or for any Python-side
* derived class that uses single inheritance. Will contain as many types as required for a Python
* class that uses multiple inheritance to inherit (directly or indirectly) from multiple
* pybind-registered classes. Will be empty if neither the type nor any base classes are
* pybind-registered.
*
* The value is cached for the lifetime of the Python type.
*/
inline const std::vector<detail::type_info *> &all_type_info(PyTypeObject *type) {
auto ins = all_type_info_get_cache(type);
if (ins.second) {
// New cache entry: populate it
all_type_info_populate(type, ins.first->second);
}
return ins.first->second;
}
/**
* Gets a single pybind11 type info for a python type. Returns nullptr if neither the type nor any
* ancestors are pybind11-registered. Throws an exception if there are multiple bases--use
* `all_type_info` instead if you want to support multiple bases.
*/
PYBIND11_NOINLINE detail::type_info *get_type_info(PyTypeObject *type) {
const auto &bases = all_type_info(type);
if (bases.empty()) {
return nullptr;
}
if (bases.size() > 1) {
pybind11_fail(
"pybind11::detail::get_type_info: type has multiple pybind11-registered bases");
}
return bases.front();
}
inline detail::type_info *get_local_type_info(const std::type_index &tp) {
auto &locals = get_local_internals().registered_types_cpp;
auto it = locals.find(tp);
if (it != locals.end()) {
return it->second;
}
return nullptr;
}
inline detail::type_info *get_global_type_info(const std::type_index &tp) {
return with_internals([&](internals &internals) {
detail::type_info *type_info = nullptr;
auto &types = internals.registered_types_cpp;
auto it = types.find(tp);
if (it != types.end()) {
type_info = it->second;
}
return type_info;
});
}
/// Return the type info for a given C++ type; on lookup failure can either throw or return
/// nullptr.
PYBIND11_NOINLINE detail::type_info *get_type_info(const std::type_index &tp,
bool throw_if_missing = false) {
if (auto *ltype = get_local_type_info(tp)) {
return ltype;
}
if (auto *gtype = get_global_type_info(tp)) {
return gtype;
}
if (throw_if_missing) {
std::string tname = tp.name();
detail::clean_type_id(tname);
pybind11_fail("pybind11::detail::get_type_info: unable to find type info for \""
+ std::move(tname) + '"');
}
return nullptr;
}
PYBIND11_NOINLINE handle get_type_handle(const std::type_info &tp, bool throw_if_missing) {
detail::type_info *type_info = get_type_info(tp, throw_if_missing);
return handle(type_info ? ((PyObject *) type_info->type) : nullptr);
}
// Searches the inheritance graph for a registered Python instance, using all_type_info().
PYBIND11_NOINLINE handle find_registered_python_instance(void *src,
const detail::type_info *tinfo) {
return with_instance_map(src, [&](instance_map &instances) {
auto it_instances = instances.equal_range(src);
for (auto it_i = it_instances.first; it_i != it_instances.second; ++it_i) {
for (auto *instance_type : detail::all_type_info(Py_TYPE(it_i->second))) {
if (instance_type && same_type(*instance_type->cpptype, *tinfo->cpptype)) {
return handle((PyObject *) it_i->second).inc_ref();
}
}
}
return handle();
});
}
// Container for accessing and iterating over an instance's values/holders
struct values_and_holders {
private:
instance *inst;
using type_vec = std::vector<detail::type_info *>;
const type_vec &tinfo;
public:
explicit values_and_holders(instance *inst)
: inst{inst}, tinfo(all_type_info(Py_TYPE(inst))) {}
explicit values_and_holders(PyObject *obj)
: inst{nullptr}, tinfo(all_type_info(Py_TYPE(obj))) {
if (!tinfo.empty()) {
inst = reinterpret_cast<instance *>(obj);
}
}
struct iterator {
private:
instance *inst = nullptr;
const type_vec *types = nullptr;
value_and_holder curr;
friend struct values_and_holders;
iterator(instance *inst, const type_vec *tinfo) : inst{inst}, types{tinfo} {
if (inst != nullptr) {
assert(!types->empty());
curr = value_and_holder(
inst /* instance */,
(*types)[0] /* type info */,
0, /* vpos: (non-simple types only): the first vptr comes first */
0 /* index */);
}
}
// Past-the-end iterator:
explicit iterator(size_t end) : curr(end) {}
public:
bool operator==(const iterator &other) const { return curr.index == other.curr.index; }
bool operator!=(const iterator &other) const { return curr.index != other.curr.index; }
iterator &operator++() {
if (!inst->simple_layout) {
curr.vh += 1 + (*types)[curr.index]->holder_size_in_ptrs;
}
++curr.index;
curr.type = curr.index < types->size() ? (*types)[curr.index] : nullptr;
return *this;
}
value_and_holder &operator*() { return curr; }
value_and_holder *operator->() { return &curr; }
};
iterator begin() { return iterator(inst, &tinfo); }
iterator end() { return iterator(tinfo.size()); }
iterator find(const type_info *find_type) {
auto it = begin(), endit = end();
while (it != endit && it->type != find_type) {
++it;
}
return it;
}
size_t size() { return tinfo.size(); }
// Band-aid workaround to fix a subtle but serious bug in a minimalistic fashion. See PR #4762.
bool is_redundant_value_and_holder(const value_and_holder &vh) {
for (size_t i = 0; i < vh.index; i++) {
if (PyType_IsSubtype(tinfo[i]->type, tinfo[vh.index]->type) != 0) {
return true;
}
}
return false;
}
};
/**
* Extracts C++ value and holder pointer references from an instance (which may contain multiple
* values/holders for python-side multiple inheritance) that match the given type. Throws an error
* if the given type (or ValueType, if omitted) is not a pybind11 base of the given instance. If
* `find_type` is omitted (or explicitly specified as nullptr) the first value/holder are returned,
* regardless of type (and the resulting .type will be nullptr).
*
* The returned object should be short-lived: in particular, it must not outlive the called-upon
* instance.
*/
PYBIND11_NOINLINE value_and_holder
instance::get_value_and_holder(const type_info *find_type /*= nullptr default in common.h*/,
bool throw_if_missing /*= true in common.h*/) {
// Optimize common case:
if (!find_type || Py_TYPE(this) == find_type->type) {
return value_and_holder(this, find_type, 0, 0);
}
detail::values_and_holders vhs(this);
auto it = vhs.find(find_type);
if (it != vhs.end()) {
return *it;
}
if (!throw_if_missing) {
return value_and_holder();
}
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
pybind11_fail("pybind11::detail::instance::get_value_and_holder: `"
+ get_fully_qualified_tp_name(find_type->type)
+ "' is not a pybind11 base of the given `"
+ get_fully_qualified_tp_name(Py_TYPE(this)) + "' instance");
#else
pybind11_fail(
"pybind11::detail::instance::get_value_and_holder: "
"type is not a pybind11 base of the given instance "
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for type details)");
#endif
}
PYBIND11_NOINLINE void instance::allocate_layout() {
const auto &tinfo = all_type_info(Py_TYPE(this));
const size_t n_types = tinfo.size();
if (n_types == 0) {
pybind11_fail(
"instance allocation failed: new instance has no pybind11-registered base types");
}
simple_layout
= n_types == 1 && tinfo.front()->holder_size_in_ptrs <= instance_simple_holder_in_ptrs();
// Simple path: no python-side multiple inheritance, and a small-enough holder
if (simple_layout) {
simple_value_holder[0] = nullptr;
simple_holder_constructed = false;
simple_instance_registered = false;
} else { // multiple base types or a too-large holder
// Allocate space to hold: [v1*][h1][v2*][h2]...[bb...] where [vN*] is a value pointer,
// [hN] is the (uninitialized) holder instance for value N, and [bb...] is a set of bool
// values that tracks whether each associated holder has been initialized. Each [block] is
// padded, if necessary, to an integer multiple of sizeof(void *).
size_t space = 0;
for (auto *t : tinfo) {
space += 1; // value pointer
space += t->holder_size_in_ptrs; // holder instance
}
size_t flags_at = space;
space += size_in_ptrs(n_types); // status bytes (holder_constructed and
// instance_registered)
// Allocate space for flags, values, and holders, and initialize it to 0 (flags and values,
// in particular, need to be 0). Use Python's memory allocation
// functions: Python is using pymalloc, which is designed to be
// efficient for small allocations like the one we're doing here;
// for larger allocations they are just wrappers around malloc.
// TODO: is this still true for pure Python 3.6?
nonsimple.values_and_holders = (void **) PyMem_Calloc(space, sizeof(void *));
if (!nonsimple.values_and_holders) {
throw std::bad_alloc();
}
nonsimple.status
= reinterpret_cast<std::uint8_t *>(&nonsimple.values_and_holders[flags_at]);
}
owned = true;
}
// NOLINTNEXTLINE(readability-make-member-function-const)
PYBIND11_NOINLINE void instance::deallocate_layout() {
if (!simple_layout) {
PyMem_Free(reinterpret_cast<void *>(nonsimple.values_and_holders));
}
}
PYBIND11_NOINLINE bool isinstance_generic(handle obj, const std::type_info &tp) {
handle type = detail::get_type_handle(tp, false);
if (!type) {
return false;
}
return isinstance(obj, type);
}
PYBIND11_NOINLINE handle get_object_handle(const void *ptr, const detail::type_info *type) {
return with_instance_map(ptr, [&](instance_map &instances) {
auto range = instances.equal_range(ptr);
for (auto it = range.first; it != range.second; ++it) {
for (const auto &vh : values_and_holders(it->second)) {
if (vh.type == type) {
return handle((PyObject *) it->second);
}
}
}
return handle();
});
}
inline PyThreadState *get_thread_state_unchecked() {
#if defined(PYPY_VERSION)
return PyThreadState_GET();
#elif PY_VERSION_HEX < 0x030D0000
return _PyThreadState_UncheckedGet();
#else
return PyThreadState_GetUnchecked();
#endif
}
// Forward declarations
void keep_alive_impl(handle nurse, handle patient);
inline PyObject *make_new_instance(PyTypeObject *type);
class type_caster_generic {
public:
PYBIND11_NOINLINE explicit type_caster_generic(const std::type_info &type_info)
: typeinfo(get_type_info(type_info)), cpptype(&type_info) {}
explicit type_caster_generic(const type_info *typeinfo)
: typeinfo(typeinfo), cpptype(typeinfo ? typeinfo->cpptype : nullptr) {}
bool load(handle src, bool convert) { return load_impl<type_caster_generic>(src, convert); }
PYBIND11_NOINLINE static handle cast(const void *_src,
return_value_policy policy,
handle parent,
const detail::type_info *tinfo,
void *(*copy_constructor)(const void *),
void *(*move_constructor)(const void *),
const void *existing_holder = nullptr) {
if (!tinfo) { // no type info: error will be set already
return handle();
}
void *src = const_cast<void *>(_src);
if (src == nullptr) {
return none().release();
}
if (handle registered_inst = find_registered_python_instance(src, tinfo)) {
return registered_inst;
}
auto inst = reinterpret_steal<object>(make_new_instance(tinfo->type));
auto *wrapper = reinterpret_cast<instance *>(inst.ptr());
wrapper->owned = false;
void *&valueptr = values_and_holders(wrapper).begin()->value_ptr();
switch (policy) {
case return_value_policy::automatic:
case return_value_policy::take_ownership:
valueptr = src;
wrapper->owned = true;
break;
case return_value_policy::automatic_reference:
case return_value_policy::reference:
valueptr = src;
wrapper->owned = false;
break;
case return_value_policy::copy:
if (copy_constructor) {
valueptr = copy_constructor(src);
} else {
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
std::string type_name(tinfo->cpptype->name());
detail::clean_type_id(type_name);
throw cast_error("return_value_policy = copy, but type " + type_name
+ " is non-copyable!");
#else
throw cast_error("return_value_policy = copy, but type is "
"non-copyable! (#define PYBIND11_DETAILED_ERROR_MESSAGES or "
"compile in debug mode for details)");
#endif
}
wrapper->owned = true;
break;
case return_value_policy::move:
if (move_constructor) {
valueptr = move_constructor(src);
} else if (copy_constructor) {
valueptr = copy_constructor(src);
} else {
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
std::string type_name(tinfo->cpptype->name());
detail::clean_type_id(type_name);
throw cast_error("return_value_policy = move, but type " + type_name
+ " is neither movable nor copyable!");
#else
throw cast_error("return_value_policy = move, but type is neither "
"movable nor copyable! "
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in "
"debug mode for details)");
#endif
}
wrapper->owned = true;
break;
case return_value_policy::reference_internal:
valueptr = src;
wrapper->owned = false;
keep_alive_impl(inst, parent);
break;
default:
throw cast_error("unhandled return_value_policy: should not happen!");
}
tinfo->init_instance(wrapper, existing_holder);
return inst.release();
}
// Base methods for generic caster; there are overridden in copyable_holder_caster
void load_value(value_and_holder &&v_h) {
auto *&vptr = v_h.value_ptr();
// Lazy allocation for unallocated values:
if (vptr == nullptr) {
const auto *type = v_h.type ? v_h.type : typeinfo;
if (type->operator_new) {
vptr = type->operator_new(type->type_size);
} else {
#if defined(__cpp_aligned_new) && (!defined(_MSC_VER) || _MSC_VER >= 1912)
if (type->type_align > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
vptr = ::operator new(type->type_size, std::align_val_t(type->type_align));
} else {
vptr = ::operator new(type->type_size);
}
#else
vptr = ::operator new(type->type_size);
#endif
}
}
value = vptr;
}
bool try_implicit_casts(handle src, bool convert) {
for (const auto &cast : typeinfo->implicit_casts) {
type_caster_generic sub_caster(*cast.first);
if (sub_caster.load(src, convert)) {
value = cast.second(sub_caster.value);
return true;
}
}
return false;
}
bool try_direct_conversions(handle src) {
for (auto &converter : *typeinfo->direct_conversions) {
if (converter(src.ptr(), value)) {
return true;
}
}
return false;
}
bool try_cpp_conduit(handle src) {
value = try_raw_pointer_ephemeral_from_cpp_conduit(src, cpptype);
if (value != nullptr) {
return true;
}
return false;
}
void check_holder_compat() {}
PYBIND11_NOINLINE static void *local_load(PyObject *src, const type_info *ti) {
auto caster = type_caster_generic(ti);
if (caster.load(src, false)) {
return caster.value;
}
return nullptr;
}
/// Try to load with foreign typeinfo, if available. Used when there is no
/// native typeinfo, or when the native one wasn't able to produce a value.
PYBIND11_NOINLINE bool try_load_foreign_module_local(handle src) {
constexpr auto *local_key = PYBIND11_MODULE_LOCAL_ID;
const auto pytype = type::handle_of(src);
if (!hasattr(pytype, local_key)) {
return false;
}
type_info *foreign_typeinfo = reinterpret_borrow<capsule>(getattr(pytype, local_key));
// Only consider this foreign loader if actually foreign and is a loader of the correct cpp
// type
if (foreign_typeinfo->module_local_load == &local_load
|| (cpptype && !same_type(*cpptype, *foreign_typeinfo->cpptype))) {
return false;
}
if (auto *result = foreign_typeinfo->module_local_load(src.ptr(), foreign_typeinfo)) {
value = result;
return true;
}
return false;
}
// Implementation of `load`; this takes the type of `this` so that it can dispatch the relevant
// bits of code between here and copyable_holder_caster where the two classes need different
// logic (without having to resort to virtual inheritance).
template <typename ThisT>
PYBIND11_NOINLINE bool load_impl(handle src, bool convert) {
if (!src) {
return false;
}
if (!typeinfo) {
return try_load_foreign_module_local(src);
}
auto &this_ = static_cast<ThisT &>(*this);
this_.check_holder_compat();
PyTypeObject *srctype = Py_TYPE(src.ptr());
// Case 1: If src is an exact type match for the target type then we can reinterpret_cast
// the instance's value pointer to the target type:
if (srctype == typeinfo->type) {
this_.load_value(reinterpret_cast<instance *>(src.ptr())->get_value_and_holder());
return true;
}
// Case 2: We have a derived class
if (PyType_IsSubtype(srctype, typeinfo->type)) {
const auto &bases = all_type_info(srctype);
bool no_cpp_mi = typeinfo->simple_type;
// Case 2a: the python type is a Python-inherited derived class that inherits from just
// one simple (no MI) pybind11 class, or is an exact match, so the C++ instance is of
// the right type and we can use reinterpret_cast.
// (This is essentially the same as case 2b, but because not using multiple inheritance
// is extremely common, we handle it specially to avoid the loop iterator and type
// pointer lookup overhead)
if (bases.size() == 1 && (no_cpp_mi || bases.front()->type == typeinfo->type)) {
this_.load_value(reinterpret_cast<instance *>(src.ptr())->get_value_and_holder());
return true;
}
// Case 2b: the python type inherits from multiple C++ bases. Check the bases to see
// if we can find an exact match (or, for a simple C++ type, an inherited match); if
// so, we can safely reinterpret_cast to the relevant pointer.
if (bases.size() > 1) {
for (auto *base : bases) {
if (no_cpp_mi ? PyType_IsSubtype(base->type, typeinfo->type)
: base->type == typeinfo->type) {
this_.load_value(
reinterpret_cast<instance *>(src.ptr())->get_value_and_holder(base));
return true;
}
}
}
// Case 2c: C++ multiple inheritance is involved and we couldn't find an exact type
// match in the registered bases, above, so try implicit casting (needed for proper C++
// casting when MI is involved).
if (this_.try_implicit_casts(src, convert)) {
return true;
}
}
// Perform an implicit conversion
if (convert) {
for (const auto &converter : typeinfo->implicit_conversions) {
auto temp = reinterpret_steal<object>(converter(src.ptr(), typeinfo->type));
if (load_impl<ThisT>(temp, false)) {
loader_life_support::add_patient(temp);
return true;
}
}
if (this_.try_direct_conversions(src)) {
return true;
}
}
// Failed to match local typeinfo. Try again with global.
if (typeinfo->module_local) {
if (auto *gtype = get_global_type_info(*typeinfo->cpptype)) {
typeinfo = gtype;
return load(src, false);
}
}
// Global typeinfo has precedence over foreign module_local
if (try_load_foreign_module_local(src)) {
return true;
}
// Custom converters didn't take None, now we convert None to nullptr.
if (src.is_none()) {
// Defer accepting None to other overloads (if we aren't in convert mode):
if (!convert) {
return false;
}
value = nullptr;
return true;
}
if (convert && cpptype && this_.try_cpp_conduit(src)) {
return true;
}
return false;
}
// Called to do type lookup and wrap the pointer and type in a pair when a dynamic_cast
// isn't needed or can't be used. If the type is unknown, sets the error and returns a pair
// with .second = nullptr. (p.first = nullptr is not an error: it becomes None).
PYBIND11_NOINLINE static std::pair<const void *, const type_info *>
src_and_type(const void *src,
const std::type_info &cast_type,
const std::type_info *rtti_type = nullptr) {
if (auto *tpi = get_type_info(cast_type)) {
return {src, const_cast<const type_info *>(tpi)};
}
// Not found, set error:
std::string tname = rtti_type ? rtti_type->name() : cast_type.name();
detail::clean_type_id(tname);
std::string msg = "Unregistered type : " + tname;
set_error(PyExc_TypeError, msg.c_str());
return {nullptr, nullptr};
}
const type_info *typeinfo = nullptr;
const std::type_info *cpptype = nullptr;
void *value = nullptr;
};
inline object cpp_conduit_method(handle self,
const bytes &pybind11_platform_abi_id,
const capsule &cpp_type_info_capsule,
const bytes &pointer_kind) {
#ifdef PYBIND11_HAS_STRING_VIEW
using cpp_str = std::string_view;
#else
using cpp_str = std::string;
#endif
if (cpp_str(pybind11_platform_abi_id) != PYBIND11_PLATFORM_ABI_ID) {
return none();
}
if (std::strcmp(cpp_type_info_capsule.name(), typeid(std::type_info).name()) != 0) {
return none();
}
if (cpp_str(pointer_kind) != "raw_pointer_ephemeral") {
throw std::runtime_error("Invalid pointer_kind: \"" + std::string(pointer_kind) + "\"");
}
const auto *cpp_type_info = cpp_type_info_capsule.get_pointer<const std::type_info>();
type_caster_generic caster(*cpp_type_info);
if (!caster.load(self, false)) {
return none();
}
return capsule(caster.value, cpp_type_info->name());
}
/**
* Determine suitable casting operator for pointer-or-lvalue-casting type casters. The type caster
* needs to provide `operator T*()` and `operator T&()` operators.
*
* If the type supports moving the value away via an `operator T&&() &&` method, it should use
* `movable_cast_op_type` instead.
*/
template <typename T>
using cast_op_type = conditional_t<std::is_pointer<remove_reference_t<T>>::value,
typename std::add_pointer<intrinsic_t<T>>::type,
typename std::add_lvalue_reference<intrinsic_t<T>>::type>;
/**
* Determine suitable casting operator for a type caster with a movable value. Such a type caster
* needs to provide `operator T*()`, `operator T&()`, and `operator T&&() &&`. The latter will be
* called in appropriate contexts where the value can be moved rather than copied.
*
* These operator are automatically provided when using the PYBIND11_TYPE_CASTER macro.
*/
template <typename T>
using movable_cast_op_type
= conditional_t<std::is_pointer<typename std::remove_reference<T>::type>::value,
typename std::add_pointer<intrinsic_t<T>>::type,
conditional_t<std::is_rvalue_reference<T>::value,
typename std::add_rvalue_reference<intrinsic_t<T>>::type,
typename std::add_lvalue_reference<intrinsic_t<T>>::type>>;
// Does the container have a mapped type and is it recursive?
// Implemented by specializations below.
template <typename Container, typename SFINAE = void>
struct container_mapped_type_traits {
static constexpr bool has_mapped_type = false;
static constexpr bool has_recursive_mapped_type = false;
};
template <typename Container>
struct container_mapped_type_traits<
Container,
typename std::enable_if<
std::is_same<typename Container::mapped_type, Container>::value>::type> {
static constexpr bool has_mapped_type = true;
static constexpr bool has_recursive_mapped_type = true;
};
template <typename Container>
struct container_mapped_type_traits<
Container,
typename std::enable_if<
negation<std::is_same<typename Container::mapped_type, Container>>::value>::type> {
static constexpr bool has_mapped_type = true;
static constexpr bool has_recursive_mapped_type = false;
};
// Does the container have a value type and is it recursive?
// Implemented by specializations below.
template <typename Container, typename SFINAE = void>
struct container_value_type_traits : std::false_type {
static constexpr bool has_value_type = false;
static constexpr bool has_recursive_value_type = false;
};
template <typename Container>
struct container_value_type_traits<
Container,
typename std::enable_if<
std::is_same<typename Container::value_type, Container>::value>::type> {
static constexpr bool has_value_type = true;
static constexpr bool has_recursive_value_type = true;
};
template <typename Container>
struct container_value_type_traits<
Container,
typename std::enable_if<
negation<std::is_same<typename Container::value_type, Container>>::value>::type> {
static constexpr bool has_value_type = true;
static constexpr bool has_recursive_value_type = false;
};
/*
* Tag to be used for representing the bottom of recursively defined types.
* Define this tag so we don't have to use void.
*/
struct recursive_bottom {};
/*
* Implementation detail of `recursive_container_traits` below.
* `T` is the `value_type` of the container, which might need to be modified to
* avoid recursive types and const types.
*/
template <typename T, bool is_this_a_map>
struct impl_type_to_check_recursively {
/*
* If the container is recursive, then no further recursion should be done.
*/
using if_recursive = recursive_bottom;
/*
* Otherwise yield `T` unchanged.
*/
using if_not_recursive = T;
};
/*
* For pairs - only as value type of a map -, the first type should remove the `const`.
* Also, if the map is recursive, then the recursive checking should consider
* the first type only.
*/
template <typename A, typename B>
struct impl_type_to_check_recursively<std::pair<A, B>, /* is_this_a_map = */ true> {
using if_recursive = typename std::remove_const<A>::type;
using if_not_recursive = std::pair<typename std::remove_const<A>::type, B>;
};
/*
* Implementation of `recursive_container_traits` below.
*/
template <typename Container, typename SFINAE = void>
struct impl_recursive_container_traits {
using type_to_check_recursively = recursive_bottom;
};
template <typename Container>
struct impl_recursive_container_traits<
Container,
typename std::enable_if<container_value_type_traits<Container>::has_value_type>::type> {
static constexpr bool is_recursive
= container_mapped_type_traits<Container>::has_recursive_mapped_type
|| container_value_type_traits<Container>::has_recursive_value_type;
/*
* This member dictates which type Pybind11 should check recursively in traits
* such as `is_move_constructible`, `is_copy_constructible`, `is_move_assignable`, ...
* Direct access to `value_type` should be avoided:
* 1. `value_type` might recursively contain the type again
* 2. `value_type` of STL map types is `std::pair<A const, B>`, the `const`
* should be removed.
*
*/
using type_to_check_recursively = typename std::conditional<
is_recursive,
typename impl_type_to_check_recursively<
typename Container::value_type,
container_mapped_type_traits<Container>::has_mapped_type>::if_recursive,
typename impl_type_to_check_recursively<
typename Container::value_type,
container_mapped_type_traits<Container>::has_mapped_type>::if_not_recursive>::type;
};
/*
* This trait defines the `type_to_check_recursively` which is needed to properly
* handle recursively defined traits such as `is_move_constructible` without going
* into an infinite recursion.
* Should be used instead of directly accessing the `value_type`.
* It cancels the recursion by returning the `recursive_bottom` tag.
*
* The default definition of `type_to_check_recursively` is as follows:
*
* 1. By default, it is `recursive_bottom`, so that the recursion is canceled.
* 2. If the type is non-recursive and defines a `value_type`, then the `value_type` is used.
* If the `value_type` is a pair and a `mapped_type` is defined,
* then the `const` is removed from the first type.
* 3. If the type is recursive and `value_type` is not a pair, then `recursive_bottom` is returned.
* 4. If the type is recursive and `value_type` is a pair and a `mapped_type` is defined,
* then `const` is removed from the first type and the first type is returned.
*
* This behavior can be extended by the user as seen in test_stl_binders.cpp.
*
* This struct is exactly the same as impl_recursive_container_traits.
* The duplication achieves that user-defined specializations don't compete
* with internal specializations, but take precedence.
*/
template <typename Container, typename SFINAE = void>
struct recursive_container_traits : impl_recursive_container_traits<Container> {};
template <typename T>
struct is_move_constructible
: all_of<std::is_move_constructible<T>,
is_move_constructible<
typename recursive_container_traits<T>::type_to_check_recursively>> {};
template <>
struct is_move_constructible<recursive_bottom> : std::true_type {};
// Likewise for std::pair
// (after C++17 it is mandatory that the move constructor not exist when the two types aren't
// themselves move constructible, but this can not be relied upon when T1 or T2 are themselves
// containers).
template <typename T1, typename T2>
struct is_move_constructible<std::pair<T1, T2>>
: all_of<is_move_constructible<T1>, is_move_constructible<T2>> {};
// std::is_copy_constructible isn't quite enough: it lets std::vector<T> (and similar) through when
// T is non-copyable, but code containing such a copy constructor fails to actually compile.
template <typename T>
struct is_copy_constructible
: all_of<std::is_copy_constructible<T>,
is_copy_constructible<
typename recursive_container_traits<T>::type_to_check_recursively>> {};
template <>
struct is_copy_constructible<recursive_bottom> : std::true_type {};
// Likewise for std::pair
// (after C++17 it is mandatory that the copy constructor not exist when the two types aren't
// themselves copy constructible, but this can not be relied upon when T1 or T2 are themselves
// containers).
template <typename T1, typename T2>
struct is_copy_constructible<std::pair<T1, T2>>
: all_of<is_copy_constructible<T1>, is_copy_constructible<T2>> {};
// The same problems arise with std::is_copy_assignable, so we use the same workaround.
template <typename T>
struct is_copy_assignable
: all_of<
std::is_copy_assignable<T>,
is_copy_assignable<typename recursive_container_traits<T>::type_to_check_recursively>> {
};
template <>
struct is_copy_assignable<recursive_bottom> : std::true_type {};
template <typename T1, typename T2>
struct is_copy_assignable<std::pair<T1, T2>>
: all_of<is_copy_assignable<T1>, is_copy_assignable<T2>> {};
PYBIND11_NAMESPACE_END(detail)
// polymorphic_type_hook<itype>::get(src, tinfo) determines whether the object pointed
// to by `src` actually is an instance of some class derived from `itype`.
// If so, it sets `tinfo` to point to the std::type_info representing that derived
// type, and returns a pointer to the start of the most-derived object of that type
// (in which `src` is a subobject; this will be the same address as `src` in most
// single inheritance cases). If not, or if `src` is nullptr, it simply returns `src`
// and leaves `tinfo` at its default value of nullptr.
//
// The default polymorphic_type_hook just returns src. A specialization for polymorphic
// types determines the runtime type of the passed object and adjusts the this-pointer
// appropriately via dynamic_cast<void*>. This is what enables a C++ Animal* to appear
// to Python as a Dog (if Dog inherits from Animal, Animal is polymorphic, Dog is
// registered with pybind11, and this Animal is in fact a Dog).
//
// You may specialize polymorphic_type_hook yourself for types that want to appear
// polymorphic to Python but do not use C++ RTTI. (This is a not uncommon pattern
// in performance-sensitive applications, used most notably in LLVM.)
//
// polymorphic_type_hook_base allows users to specialize polymorphic_type_hook with
// std::enable_if. User provided specializations will always have higher priority than
// the default implementation and specialization provided in polymorphic_type_hook_base.
template <typename itype, typename SFINAE = void>
struct polymorphic_type_hook_base {
static const void *get(const itype *src, const std::type_info *&) { return src; }
};
template <typename itype>
struct polymorphic_type_hook_base<itype, detail::enable_if_t<std::is_polymorphic<itype>::value>> {
static const void *get(const itype *src, const std::type_info *&type) {
type = src ? &typeid(*src) : nullptr;
return dynamic_cast<const void *>(src);
}
};
template <typename itype, typename SFINAE = void>
struct polymorphic_type_hook : public polymorphic_type_hook_base<itype> {};
PYBIND11_NAMESPACE_BEGIN(detail)
/// Generic type caster for objects stored on the heap
template <typename type>
class type_caster_base : public type_caster_generic {
using itype = intrinsic_t<type>;
public:
static constexpr auto name = const_name<type>();
type_caster_base() : type_caster_base(typeid(type)) {}
explicit type_caster_base(const std::type_info &info) : type_caster_generic(info) {}
static handle cast(const itype &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast(std::addressof(src), policy, parent);
}
static handle cast(itype &&src, return_value_policy, handle parent) {
return cast(std::addressof(src), return_value_policy::move, parent);
}
// Returns a (pointer, type_info) pair taking care of necessary type lookup for a
// polymorphic type (using RTTI by default, but can be overridden by specializing
// polymorphic_type_hook). If the instance isn't derived, returns the base version.
static std::pair<const void *, const type_info *> src_and_type(const itype *src) {
const auto &cast_type = typeid(itype);
const std::type_info *instance_type = nullptr;
const void *vsrc = polymorphic_type_hook<itype>::get(src, instance_type);
if (instance_type && !same_type(cast_type, *instance_type)) {
// This is a base pointer to a derived type. If the derived type is registered
// with pybind11, we want to make the full derived object available.
// In the typical case where itype is polymorphic, we get the correct
// derived pointer (which may be != base pointer) by a dynamic_cast to
// most derived type. If itype is not polymorphic, we won't get here
// except via a user-provided specialization of polymorphic_type_hook,
// and the user has promised that no this-pointer adjustment is
// required in that case, so it's OK to use static_cast.
if (const auto *tpi = get_type_info(*instance_type)) {
return {vsrc, tpi};
}
}
// Otherwise we have either a nullptr, an `itype` pointer, or an unknown derived pointer,
// so don't do a cast
return type_caster_generic::src_and_type(src, cast_type, instance_type);
}
static handle cast(const itype *src, return_value_policy policy, handle parent) {
auto st = src_and_type(src);
return type_caster_generic::cast(st.first,
policy,
parent,
st.second,
make_copy_constructor(src),
make_move_constructor(src));
}
static handle cast_holder(const itype *src, const void *holder) {
auto st = src_and_type(src);
return type_caster_generic::cast(st.first,
return_value_policy::take_ownership,
{},
st.second,
nullptr,
nullptr,
holder);
}
template <typename T>
using cast_op_type = detail::cast_op_type<T>;
// NOLINTNEXTLINE(google-explicit-constructor)
operator itype *() { return (type *) value; }
// NOLINTNEXTLINE(google-explicit-constructor)
operator itype &() {
if (!value) {
throw reference_cast_error();
}
return *((itype *) value);
}
protected:
using Constructor = void *(*) (const void *);
/* Only enabled when the types are {copy,move}-constructible *and* when the type
does not have a private operator new implementation. A comma operator is used in the
decltype argument to apply SFINAE to the public copy/move constructors.*/
template <typename T, typename = enable_if_t<is_copy_constructible<T>::value>>
static auto make_copy_constructor(const T *) -> decltype(new T(std::declval<const T>()),
Constructor{}) {
return [](const void *arg) -> void * { return new T(*reinterpret_cast<const T *>(arg)); };
}
template <typename T, typename = enable_if_t<is_move_constructible<T>::value>>
static auto make_move_constructor(const T *) -> decltype(new T(std::declval<T &&>()),
Constructor{}) {
return [](const void *arg) -> void * {
return new T(std::move(*const_cast<T *>(reinterpret_cast<const T *>(arg))));
};
}
static Constructor make_copy_constructor(...) { return nullptr; }
static Constructor make_move_constructor(...) { return nullptr; }
};
inline std::string quote_cpp_type_name(const std::string &cpp_type_name) {
return cpp_type_name; // No-op for now. See PR #4888
}
PYBIND11_NOINLINE std::string type_info_description(const std::type_info &ti) {
if (auto *type_data = get_type_info(ti)) {
handle th((PyObject *) type_data->type);
return th.attr("__module__").cast<std::string>() + '.'
+ th.attr("__qualname__").cast<std::string>();
}
return quote_cpp_type_name(clean_type_id(ti.name()));
}
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
==============================================================================================================================
SOURCE CODE FILE: typeid.h
LINES: 1
SIZE: 1.65 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\detail\typeid.h
ENCODING: utf-8
```h
/*
pybind11/detail/typeid.h: Compiler-independent access to type identifiers
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <cstdio>
#include <cstdlib>
#if defined(__GNUG__)
# include <cxxabi.h>
#endif
#include "common.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
/// Erase all occurrences of a substring
inline void erase_all(std::string &string, const std::string &search) {
for (size_t pos = 0;;) {
pos = string.find(search, pos);
if (pos == std::string::npos) {
break;
}
string.erase(pos, search.length());
}
}
PYBIND11_NOINLINE void clean_type_id(std::string &name) {
#if defined(__GNUG__)
int status = 0;
std::unique_ptr<char, void (*)(void *)> res{
abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), std::free};
if (status == 0) {
name = res.get();
}
#else
detail::erase_all(name, "class ");
detail::erase_all(name, "struct ");
detail::erase_all(name, "enum ");
#endif
detail::erase_all(name, "pybind11::");
}
inline std::string clean_type_id(const char *typeid_name) {
std::string name(typeid_name);
detail::clean_type_id(name);
return name;
}
PYBIND11_NAMESPACE_END(detail)
/// Return a string representation of a C++ type
template <typename T>
static std::string type_id() {
return detail::clean_type_id(typeid(T).name());
}
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
========================================================================================================================================
SOURCE CODE FILE: value_and_holder.h
LINES: 1
SIZE: 2.82 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\detail\value_and_holder.h
ENCODING: utf-8
```h
// Copyright (c) 2016-2024 The Pybind Development Team.
// All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#pragma once
#include "common.h"
#include <cstddef>
#include <typeinfo>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
struct value_and_holder {
instance *inst = nullptr;
size_t index = 0u;
const detail::type_info *type = nullptr;
void **vh = nullptr;
// Main constructor for a found value/holder:
value_and_holder(instance *i, const detail::type_info *type, size_t vpos, size_t index)
: inst{i}, index{index}, type{type},
vh{inst->simple_layout ? inst->simple_value_holder
: &inst->nonsimple.values_and_holders[vpos]} {}
// Default constructor (used to signal a value-and-holder not found by get_value_and_holder())
value_and_holder() = default;
// Used for past-the-end iterator
explicit value_and_holder(size_t index) : index{index} {}
template <typename V = void>
V *&value_ptr() const {
return reinterpret_cast<V *&>(vh[0]);
}
// True if this `value_and_holder` has a non-null value pointer
explicit operator bool() const { return value_ptr() != nullptr; }
template <typename H>
H &holder() const {
return reinterpret_cast<H &>(vh[1]);
}
bool holder_constructed() const {
return inst->simple_layout
? inst->simple_holder_constructed
: (inst->nonsimple.status[index] & instance::status_holder_constructed) != 0u;
}
// NOLINTNEXTLINE(readability-make-member-function-const)
void set_holder_constructed(bool v = true) {
if (inst->simple_layout) {
inst->simple_holder_constructed = v;
} else if (v) {
inst->nonsimple.status[index] |= instance::status_holder_constructed;
} else {
inst->nonsimple.status[index] &= (std::uint8_t) ~instance::status_holder_constructed;
}
}
bool instance_registered() const {
return inst->simple_layout
? inst->simple_instance_registered
: ((inst->nonsimple.status[index] & instance::status_instance_registered) != 0);
}
// NOLINTNEXTLINE(readability-make-member-function-const)
void set_instance_registered(bool v = true) {
if (inst->simple_layout) {
inst->simple_instance_registered = v;
} else if (v) {
inst->nonsimple.status[index] |= instance::status_instance_registered;
} else {
inst->nonsimple.status[index] &= (std::uint8_t) ~instance::status_instance_registered;
}
}
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
======================================================================================================================
SOURCE CODE FILE: eigen.h
LINES: 1
SIZE: 0.32 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\eigen.h
ENCODING: utf-8
```h
/*
pybind11/eigen.h: Transparent conversion for dense and sparse Eigen matrices
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "eigen/matrix.h"
```
|
=============================================================================================================================
SOURCE CODE FILE: common.h
LINES: 1
SIZE: 0.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\eigen\common.h
ENCODING: utf-8
```h
// Copyright (c) 2023 The pybind Community.
#pragma once
// Common message for `static_assert()`s, which are useful to easily
// preempt much less obvious errors.
#define PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED \
"Pointer types (in particular `PyObject *`) are not supported as scalar types for Eigen " \
"types."
```
|
=============================================================================================================================
SOURCE CODE FILE: matrix.h
LINES: 1
SIZE: 32.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\eigen\matrix.h
ENCODING: utf-8
```h
/*
pybind11/eigen/matrix.h: Transparent conversion for dense and sparse Eigen matrices
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <pybind11/numpy.h>
#include "common.h"
/* HINT: To suppress warnings originating from the Eigen headers, use -isystem.
See also:
https://stackoverflow.com/questions/2579576/i-dir-vs-isystem-dir
https://stackoverflow.com/questions/1741816/isystem-for-ms-visual-studio-c-compiler
*/
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_MSVC(5054) // https://github.com/pybind/pybind11/pull/3741
// C5054: operator '&': deprecated between enumerations of different types
#if defined(__MINGW32__)
PYBIND11_WARNING_DISABLE_GCC("-Wmaybe-uninitialized")
#endif
#include <Eigen/Core>
#include <Eigen/SparseCore>
PYBIND11_WARNING_POP
// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit
// move constructors that break things. We could detect this an explicitly copy, but an extra copy
// of matrices seems highly undesirable.
static_assert(EIGEN_VERSION_AT_LEAST(3, 2, 7),
"Eigen matrix support in pybind11 requires Eigen >= 3.2.7");
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_WARNING_DISABLE_MSVC(4127)
// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides:
using EigenDStride = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
template <typename MatrixType>
using EigenDRef = Eigen::Ref<MatrixType, 0, EigenDStride>;
template <typename MatrixType>
using EigenDMap = Eigen::Map<MatrixType, 0, EigenDStride>;
PYBIND11_NAMESPACE_BEGIN(detail)
#if EIGEN_VERSION_AT_LEAST(3, 3, 0)
using EigenIndex = Eigen::Index;
template <typename Scalar, int Flags, typename StorageIndex>
using EigenMapSparseMatrix = Eigen::Map<Eigen::SparseMatrix<Scalar, Flags, StorageIndex>>;
#else
using EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE;
template <typename Scalar, int Flags, typename StorageIndex>
using EigenMapSparseMatrix = Eigen::MappedSparseMatrix<Scalar, Flags, StorageIndex>;
#endif
// Matches Eigen::Map, Eigen::Ref, blocks, etc:
template <typename T>
using is_eigen_dense_map = all_of<is_template_base_of<Eigen::DenseBase, T>,
std::is_base_of<Eigen::MapBase<T, Eigen::ReadOnlyAccessors>, T>>;
template <typename T>
using is_eigen_mutable_map = std::is_base_of<Eigen::MapBase<T, Eigen::WriteAccessors>, T>;
template <typename T>
using is_eigen_dense_plain
= all_of<negation<is_eigen_dense_map<T>>, is_template_base_of<Eigen::PlainObjectBase, T>>;
template <typename T>
using is_eigen_sparse = is_template_base_of<Eigen::SparseMatrixBase, T>;
// Test for objects inheriting from EigenBase<Derived> that aren't captured by the above. This
// basically covers anything that can be assigned to a dense matrix but that don't have a typical
// matrix data layout that can be copied from their .data(). For example, DiagonalMatrix and
// SelfAdjointView fall into this category.
template <typename T>
using is_eigen_other
= all_of<is_template_base_of<Eigen::EigenBase, T>,
negation<any_of<is_eigen_dense_map<T>, is_eigen_dense_plain<T>, is_eigen_sparse<T>>>>;
// Captures numpy/eigen conformability status (returned by EigenProps::conformable()):
template <bool EigenRowMajor>
struct EigenConformable {
bool conformable = false;
EigenIndex rows = 0, cols = 0;
EigenDStride stride{0, 0}; // Only valid if negativestrides is false!
bool negativestrides = false; // If true, do not use stride!
// NOLINTNEXTLINE(google-explicit-constructor)
EigenConformable(bool fits = false) : conformable{fits} {}
// Matrix type:
EigenConformable(EigenIndex r, EigenIndex c, EigenIndex rstride, EigenIndex cstride)
: conformable{true}, rows{r}, cols{c},
// TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity.
// http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747
stride{EigenRowMajor ? (rstride > 0 ? rstride : 0)
: (cstride > 0 ? cstride : 0) /* outer stride */,
EigenRowMajor ? (cstride > 0 ? cstride : 0)
: (rstride > 0 ? rstride : 0) /* inner stride */},
negativestrides{rstride < 0 || cstride < 0} {}
// Vector type:
EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride)
: EigenConformable(r, c, r == 1 ? c * stride : stride, c == 1 ? r : r * stride) {}
template <typename props>
bool stride_compatible() const {
// To have compatible strides, we need (on both dimensions) one of fully dynamic strides,
// matching strides, or a dimension size of 1 (in which case the stride value is
// irrelevant). Alternatively, if any dimension size is 0, the strides are not relevant
// (and numpy ≥ 1.23 sets the strides to 0 in that case, so we need to check explicitly).
if (negativestrides) {
return false;
}
if (rows == 0 || cols == 0) {
return true;
}
return (props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner()
|| (EigenRowMajor ? cols : rows) == 1)
&& (props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer()
|| (EigenRowMajor ? rows : cols) == 1);
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator bool() const { return conformable; }
};
template <typename Type>
struct eigen_extract_stride {
using type = Type;
};
template <typename PlainObjectType, int MapOptions, typename StrideType>
struct eigen_extract_stride<Eigen::Map<PlainObjectType, MapOptions, StrideType>> {
using type = StrideType;
};
template <typename PlainObjectType, int Options, typename StrideType>
struct eigen_extract_stride<Eigen::Ref<PlainObjectType, Options, StrideType>> {
using type = StrideType;
};
// Helper struct for extracting information from an Eigen type
template <typename Type_>
struct EigenProps {
using Type = Type_;
using Scalar = typename Type::Scalar;
using StrideType = typename eigen_extract_stride<Type>::type;
static constexpr EigenIndex rows = Type::RowsAtCompileTime, cols = Type::ColsAtCompileTime,
size = Type::SizeAtCompileTime;
static constexpr bool row_major = Type::IsRowMajor,
vector
= Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1
fixed_rows = rows != Eigen::Dynamic, fixed_cols = cols != Eigen::Dynamic,
fixed = size != Eigen::Dynamic, // Fully-fixed size
dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size
template <EigenIndex i, EigenIndex ifzero>
using if_zero = std::integral_constant<EigenIndex, i == 0 ? ifzero : i>;
static constexpr EigenIndex inner_stride
= if_zero<StrideType::InnerStrideAtCompileTime, 1>::value,
outer_stride = if_zero < StrideType::OuterStrideAtCompileTime,
vector ? size
: row_major ? cols
: rows > ::value;
static constexpr bool dynamic_stride
= inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic;
static constexpr bool requires_row_major
= !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1;
static constexpr bool requires_col_major
= !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1;
// Takes an input array and determines whether we can make it fit into the Eigen type. If
// the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector
// (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type).
static EigenConformable<row_major> conformable(const array &a) {
const auto dims = a.ndim();
if (dims < 1 || dims > 2) {
return false;
}
if (dims == 2) { // Matrix type: require exact match (or dynamic)
EigenIndex np_rows = a.shape(0), np_cols = a.shape(1),
np_rstride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar)),
np_cstride = a.strides(1) / static_cast<ssize_t>(sizeof(Scalar));
if ((fixed_rows && np_rows != rows) || (fixed_cols && np_cols != cols)) {
return false;
}
return {np_rows, np_cols, np_rstride, np_cstride};
}
// Otherwise we're storing an n-vector. Only one of the strides will be used, but
// whichever is used, we want the (single) numpy stride value.
const EigenIndex n = a.shape(0),
stride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar));
if (vector) { // Eigen type is a compile-time vector
if (fixed && size != n) {
return false; // Vector size mismatch
}
return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride};
}
if (fixed) {
// The type has a fixed size, but is not a vector: abort
return false;
}
if (fixed_cols) {
// Since this isn't a vector, cols must be != 1. We allow this only if it exactly
// equals the number of elements (rows is Dynamic, and so 1 row is allowed).
if (cols != n) {
return false;
}
return {1, n, stride};
} // Otherwise it's either fully dynamic, or column dynamic; both become a column vector
if (fixed_rows && rows != n) {
return false;
}
return {n, 1, stride};
}
static constexpr bool show_writeable
= is_eigen_dense_map<Type>::value && is_eigen_mutable_map<Type>::value;
static constexpr bool show_order = is_eigen_dense_map<Type>::value;
static constexpr bool show_c_contiguous = show_order && requires_row_major;
static constexpr bool show_f_contiguous
= !show_c_contiguous && show_order && requires_col_major;
static constexpr auto descriptor
= const_name("numpy.ndarray[") + npy_format_descriptor<Scalar>::name + const_name("[")
+ const_name<fixed_rows>(const_name<(size_t) rows>(), const_name("m")) + const_name(", ")
+ const_name<fixed_cols>(const_name<(size_t) cols>(), const_name("n")) + const_name("]")
+
// For a reference type (e.g. Ref<MatrixXd>) we have other constraints that might need to
// be satisfied: writeable=True (for a mutable reference), and, depending on the map's
// stride options, possibly f_contiguous or c_contiguous. We include them in the
// descriptor output to provide some hint as to why a TypeError is occurring (otherwise
// it can be confusing to see that a function accepts a 'numpy.ndarray[float64[3,2]]' and
// an error message that you *gave* a numpy.ndarray of the right type and dimensions.
const_name<show_writeable>(", flags.writeable", "")
+ const_name<show_c_contiguous>(", flags.c_contiguous", "")
+ const_name<show_f_contiguous>(", flags.f_contiguous", "") + const_name("]");
};
// Casts an Eigen type to numpy array. If given a base, the numpy array references the src data,
// otherwise it'll make a copy. writeable lets you turn off the writeable flag for the array.
template <typename props>
handle
eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) {
constexpr ssize_t elem_size = sizeof(typename props::Scalar);
array a;
if (props::vector) {
a = array({src.size()}, {elem_size * src.innerStride()}, src.data(), base);
} else {
a = array({src.rows(), src.cols()},
{elem_size * src.rowStride(), elem_size * src.colStride()},
src.data(),
base);
}
if (!writeable) {
array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
}
return a.release();
}
// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that
// reference the Eigen object's data with `base` as the python-registered base class (if omitted,
// the base will be set to None, and lifetime management is up to the caller). The numpy array is
// non-writeable if the given type is const.
template <typename props, typename Type>
handle eigen_ref_array(Type &src, handle parent = none()) {
// none here is to get past array's should-we-copy detection, which currently always
// copies when there is no base. Setting the base to None should be harmless.
return eigen_array_cast<props>(src, parent, !std::is_const<Type>::value);
}
// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a
// numpy array that references the encapsulated data with a python-side reference to the capsule to
// tie its destruction to that of any dependent python objects. Const-ness is determined by
// whether or not the Type of the pointer given is const.
template <typename props, typename Type, typename = enable_if_t<is_eigen_dense_plain<Type>::value>>
handle eigen_encapsulate(Type *src) {
capsule base(src, [](void *o) { delete static_cast<Type *>(o); });
return eigen_ref_array<props>(*src, base);
}
// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense
// types.
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_dense_plain<Type>::value>> {
using Scalar = typename Type::Scalar;
static_assert(!std::is_pointer<Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
using props = EigenProps<Type>;
bool load(handle src, bool convert) {
// If we're in no-convert mode, only load if given an array of the correct type
if (!convert && !isinstance<array_t<Scalar>>(src)) {
return false;
}
// Coerce into an array, but don't do type conversion yet; the copy below handles it.
auto buf = array::ensure(src);
if (!buf) {
return false;
}
auto dims = buf.ndim();
if (dims < 1 || dims > 2) {
return false;
}
auto fits = props::conformable(buf);
if (!fits) {
return false;
}
// Allocate the new type, then build a numpy reference into it
value = Type(fits.rows, fits.cols);
auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));
if (dims == 1) {
ref = ref.squeeze();
} else if (ref.ndim() == 1) {
buf = buf.squeeze();
}
int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr());
if (result < 0) { // Copy failed!
PyErr_Clear();
return false;
}
return true;
}
private:
// Cast implementation
template <typename CType>
static handle cast_impl(CType *src, return_value_policy policy, handle parent) {
switch (policy) {
case return_value_policy::take_ownership:
case return_value_policy::automatic:
return eigen_encapsulate<props>(src);
case return_value_policy::move:
return eigen_encapsulate<props>(new CType(std::move(*src)));
case return_value_policy::copy:
return eigen_array_cast<props>(*src);
case return_value_policy::reference:
case return_value_policy::automatic_reference:
return eigen_ref_array<props>(*src);
case return_value_policy::reference_internal:
return eigen_ref_array<props>(*src, parent);
default:
throw cast_error("unhandled return_value_policy: should not happen!");
};
}
public:
// Normal returned non-reference, non-const value:
static handle cast(Type &&src, return_value_policy /* policy */, handle parent) {
return cast_impl(&src, return_value_policy::move, parent);
}
// If you return a non-reference const, we mark the numpy array readonly:
static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) {
return cast_impl(&src, return_value_policy::move, parent);
}
// lvalue reference return; default (automatic) becomes copy
static handle cast(Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast_impl(&src, policy, parent);
}
// const lvalue reference return; default (automatic) becomes copy
static handle cast(const Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast(&src, policy, parent);
}
// non-const pointer return
static handle cast(Type *src, return_value_policy policy, handle parent) {
return cast_impl(src, policy, parent);
}
// const pointer return
static handle cast(const Type *src, return_value_policy policy, handle parent) {
return cast_impl(src, policy, parent);
}
static constexpr auto name = props::descriptor;
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type *() { return &value; }
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type &() { return value; }
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type &&() && { return std::move(value); }
template <typename T>
using cast_op_type = movable_cast_op_type<T>;
private:
Type value;
};
// Base class for casting reference/map/block/etc. objects back to python.
template <typename MapType>
struct eigen_map_caster {
static_assert(!std::is_pointer<typename MapType::Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
private:
using props = EigenProps<MapType>;
public:
// Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has
// to stay around), but we'll allow it under the assumption that you know what you're doing
// (and have an appropriate keep_alive in place). We return a numpy array pointing directly at
// the ref's data (The numpy array ends up read-only if the ref was to a const matrix type.)
// Note that this means you need to ensure you don't destroy the object in some other way (e.g.
// with an appropriate keep_alive, or with a reference to a statically allocated matrix).
static handle cast(const MapType &src, return_value_policy policy, handle parent) {
switch (policy) {
case return_value_policy::copy:
return eigen_array_cast<props>(src);
case return_value_policy::reference_internal:
return eigen_array_cast<props>(src, parent, is_eigen_mutable_map<MapType>::value);
case return_value_policy::reference:
case return_value_policy::automatic:
case return_value_policy::automatic_reference:
return eigen_array_cast<props>(src, none(), is_eigen_mutable_map<MapType>::value);
default:
// move, take_ownership don't make any sense for a ref/map:
pybind11_fail("Invalid return_value_policy for Eigen Map/Ref/Block type");
}
}
static constexpr auto name = props::descriptor;
// Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
// types but not bound arguments). We still provide them (with an explicitly delete) so that
// you end up here if you try anyway.
bool load(handle, bool) = delete;
operator MapType() = delete;
template <typename>
using cast_op_type = MapType;
};
// We can return any map-like object (but can only load Refs, specialized next):
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_dense_map<Type>::value>> : eigen_map_caster<Type> {};
// Loader for Ref<...> arguments. See the documentation for info on how to make this work without
// copying (it requires some extra effort in many cases).
template <typename PlainObjectType, typename StrideType>
struct type_caster<
Eigen::Ref<PlainObjectType, 0, StrideType>,
enable_if_t<is_eigen_dense_map<Eigen::Ref<PlainObjectType, 0, StrideType>>::value>>
: public eigen_map_caster<Eigen::Ref<PlainObjectType, 0, StrideType>> {
private:
using Type = Eigen::Ref<PlainObjectType, 0, StrideType>;
using props = EigenProps<Type>;
using Scalar = typename props::Scalar;
static_assert(!std::is_pointer<Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
using MapType = Eigen::Map<PlainObjectType, 0, StrideType>;
using Array
= array_t<Scalar,
array::forcecast
| ((props::row_major ? props::inner_stride : props::outer_stride) == 1
? array::c_style
: (props::row_major ? props::outer_stride : props::inner_stride) == 1
? array::f_style
: 0)>;
static constexpr bool need_writeable = is_eigen_mutable_map<Type>::value;
// Delay construction (these have no default constructor)
std::unique_ptr<MapType> map;
std::unique_ptr<Type> ref;
// Our array. When possible, this is just a numpy array pointing to the source data, but
// sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an
// incompatible layout, or is an array of a type that needs to be converted). Using a numpy
// temporary (rather than an Eigen temporary) saves an extra copy when we need both type
// conversion and storage order conversion. (Note that we refuse to use this temporary copy
// when loading an argument for a Ref<M> with M non-const, i.e. a read-write reference).
Array copy_or_ref;
public:
bool load(handle src, bool convert) {
// First check whether what we have is already an array of the right type. If not, we
// can't avoid a copy (because the copy is also going to do type conversion).
bool need_copy = !isinstance<Array>(src);
EigenConformable<props::row_major> fits;
if (!need_copy) {
// We don't need a converting copy, but we also need to check whether the strides are
// compatible with the Ref's stride requirements
auto aref = reinterpret_borrow<Array>(src);
if (aref && (!need_writeable || aref.writeable())) {
fits = props::conformable(aref);
if (!fits) {
return false; // Incompatible dimensions
}
if (!fits.template stride_compatible<props>()) {
need_copy = true;
} else {
copy_or_ref = std::move(aref);
}
} else {
need_copy = true;
}
}
if (need_copy) {
// We need to copy: If we need a mutable reference, or we're not supposed to convert
// (either because we're in the no-convert overload pass, or because we're explicitly
// instructed not to copy (via `py::arg().noconvert()`) we have to fail loading.
if (!convert || need_writeable) {
return false;
}
Array copy = Array::ensure(src);
if (!copy) {
return false;
}
fits = props::conformable(copy);
if (!fits || !fits.template stride_compatible<props>()) {
return false;
}
copy_or_ref = std::move(copy);
loader_life_support::add_patient(copy_or_ref);
}
ref.reset();
map.reset(new MapType(data(copy_or_ref),
fits.rows,
fits.cols,
make_stride(fits.stride.outer(), fits.stride.inner())));
ref.reset(new Type(*map));
return true;
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type *() { return ref.get(); }
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type &() { return *ref; }
template <typename _T>
using cast_op_type = pybind11::detail::cast_op_type<_T>;
private:
template <typename T = Type, enable_if_t<is_eigen_mutable_map<T>::value, int> = 0>
Scalar *data(Array &a) {
return a.mutable_data();
}
template <typename T = Type, enable_if_t<!is_eigen_mutable_map<T>::value, int> = 0>
const Scalar *data(Array &a) {
return a.data();
}
// Attempt to figure out a constructor of `Stride` that will work.
// If both strides are fixed, use a default constructor:
template <typename S>
using stride_ctor_default = bool_constant<S::InnerStrideAtCompileTime != Eigen::Dynamic
&& S::OuterStrideAtCompileTime != Eigen::Dynamic
&& std::is_default_constructible<S>::value>;
// Otherwise, if there is a two-index constructor, assume it is (outer,inner) like
// Eigen::Stride, and use it:
template <typename S>
using stride_ctor_dual
= bool_constant<!stride_ctor_default<S>::value
&& std::is_constructible<S, EigenIndex, EigenIndex>::value>;
// Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use
// it (passing whichever stride is dynamic).
template <typename S>
using stride_ctor_outer
= bool_constant<!any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value
&& S::OuterStrideAtCompileTime == Eigen::Dynamic
&& S::InnerStrideAtCompileTime != Eigen::Dynamic
&& std::is_constructible<S, EigenIndex>::value>;
template <typename S>
using stride_ctor_inner
= bool_constant<!any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value
&& S::InnerStrideAtCompileTime == Eigen::Dynamic
&& S::OuterStrideAtCompileTime != Eigen::Dynamic
&& std::is_constructible<S, EigenIndex>::value>;
template <typename S = StrideType, enable_if_t<stride_ctor_default<S>::value, int> = 0>
static S make_stride(EigenIndex, EigenIndex) {
return S();
}
template <typename S = StrideType, enable_if_t<stride_ctor_dual<S>::value, int> = 0>
static S make_stride(EigenIndex outer, EigenIndex inner) {
return S(outer, inner);
}
template <typename S = StrideType, enable_if_t<stride_ctor_outer<S>::value, int> = 0>
static S make_stride(EigenIndex outer, EigenIndex) {
return S(outer);
}
template <typename S = StrideType, enable_if_t<stride_ctor_inner<S>::value, int> = 0>
static S make_stride(EigenIndex, EigenIndex inner) {
return S(inner);
}
};
// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not
// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout).
// load() is not supported, but we can cast them into the python domain by first copying to a
// regular Eigen::Matrix, then casting that.
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_other<Type>::value>> {
static_assert(!std::is_pointer<typename Type::Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
protected:
using Matrix
= Eigen::Matrix<typename Type::Scalar, Type::RowsAtCompileTime, Type::ColsAtCompileTime>;
using props = EigenProps<Matrix>;
public:
static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
handle h = eigen_encapsulate<props>(new Matrix(src));
return h;
}
static handle cast(const Type *src, return_value_policy policy, handle parent) {
return cast(*src, policy, parent);
}
static constexpr auto name = props::descriptor;
// Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
// types but not bound arguments). We still provide them (with an explicitly delete) so that
// you end up here if you try anyway.
bool load(handle, bool) = delete;
operator Type() = delete;
template <typename>
using cast_op_type = Type;
};
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_sparse<Type>::value>> {
using Scalar = typename Type::Scalar;
static_assert(!std::is_pointer<Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
using StorageIndex = remove_reference_t<decltype(*std::declval<Type>().outerIndexPtr())>;
using Index = typename Type::Index;
static constexpr bool rowMajor = Type::IsRowMajor;
bool load(handle src, bool) {
if (!src) {
return false;
}
auto obj = reinterpret_borrow<object>(src);
object sparse_module = module_::import("scipy.sparse");
object matrix_type = sparse_module.attr(rowMajor ? "csr_matrix" : "csc_matrix");
if (!type::handle_of(obj).is(matrix_type)) {
try {
obj = matrix_type(obj);
} catch (const error_already_set &) {
return false;
}
}
auto values = array_t<Scalar>((object) obj.attr("data"));
auto innerIndices = array_t<StorageIndex>((object) obj.attr("indices"));
auto outerIndices = array_t<StorageIndex>((object) obj.attr("indptr"));
auto shape = pybind11::tuple((pybind11::object) obj.attr("shape"));
auto nnz = obj.attr("nnz").cast<Index>();
if (!values || !innerIndices || !outerIndices) {
return false;
}
value = EigenMapSparseMatrix<Scalar,
Type::Flags &(Eigen::RowMajor | Eigen::ColMajor),
StorageIndex>(shape[0].cast<Index>(),
shape[1].cast<Index>(),
std::move(nnz),
outerIndices.mutable_data(),
innerIndices.mutable_data(),
values.mutable_data());
return true;
}
static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
const_cast<Type &>(src).makeCompressed();
object matrix_type
= module_::import("scipy.sparse").attr(rowMajor ? "csr_matrix" : "csc_matrix");
array data(src.nonZeros(), src.valuePtr());
array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr());
array innerIndices(src.nonZeros(), src.innerIndexPtr());
return matrix_type(pybind11::make_tuple(
std::move(data), std::move(innerIndices), std::move(outerIndices)),
pybind11::make_tuple(src.rows(), src.cols()))
.release();
}
PYBIND11_TYPE_CASTER(Type,
const_name<(Type::IsRowMajor) != 0>("scipy.sparse.csr_matrix[",
"scipy.sparse.csc_matrix[")
+ npy_format_descriptor<Scalar>::name + const_name("]"));
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=============================================================================================================================
SOURCE CODE FILE: tensor.h
LINES: 1
SIZE: 18.46 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\eigen\tensor.h
ENCODING: utf-8
```h
/*
pybind11/eigen/tensor.h: Transparent conversion for Eigen tensors
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <pybind11/numpy.h>
#include "common.h"
#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
static_assert(__GNUC__ > 5, "Eigen Tensor support in pybind11 requires GCC > 5.0");
#endif
// Disable warnings for Eigen
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_MSVC(4554)
PYBIND11_WARNING_DISABLE_MSVC(4127)
#if defined(__MINGW32__)
PYBIND11_WARNING_DISABLE_GCC("-Wmaybe-uninitialized")
#endif
#include <unsupported/Eigen/CXX11/Tensor>
PYBIND11_WARNING_POP
static_assert(EIGEN_VERSION_AT_LEAST(3, 3, 0),
"Eigen Tensor support in pybind11 requires Eigen >= 3.3.0");
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_WARNING_DISABLE_MSVC(4127)
PYBIND11_NAMESPACE_BEGIN(detail)
inline bool is_tensor_aligned(const void *data) {
return (reinterpret_cast<std::size_t>(data) % EIGEN_DEFAULT_ALIGN_BYTES) == 0;
}
template <typename T>
constexpr int compute_array_flag_from_tensor() {
static_assert((static_cast<int>(T::Layout) == static_cast<int>(Eigen::RowMajor))
|| (static_cast<int>(T::Layout) == static_cast<int>(Eigen::ColMajor)),
"Layout must be row or column major");
return (static_cast<int>(T::Layout) == static_cast<int>(Eigen::RowMajor)) ? array::c_style
: array::f_style;
}
template <typename T>
struct eigen_tensor_helper {};
template <typename Scalar_, int NumIndices_, int Options_, typename IndexType>
struct eigen_tensor_helper<Eigen::Tensor<Scalar_, NumIndices_, Options_, IndexType>> {
using Type = Eigen::Tensor<Scalar_, NumIndices_, Options_, IndexType>;
using ValidType = void;
static Eigen::DSizes<typename Type::Index, Type::NumIndices> get_shape(const Type &f) {
return f.dimensions();
}
static constexpr bool
is_correct_shape(const Eigen::DSizes<typename Type::Index, Type::NumIndices> & /*shape*/) {
return true;
}
template <typename T>
struct helper {};
template <size_t... Is>
struct helper<index_sequence<Is...>> {
static constexpr auto value = ::pybind11::detail::concat(const_name(((void) Is, "?"))...);
};
static constexpr auto dimensions_descriptor
= helper<decltype(make_index_sequence<Type::NumIndices>())>::value;
template <typename... Args>
static Type *alloc(Args &&...args) {
return new Type(std::forward<Args>(args)...);
}
static void free(Type *tensor) { delete tensor; }
};
template <typename Scalar_, typename std::ptrdiff_t... Indices, int Options_, typename IndexType>
struct eigen_tensor_helper<
Eigen::TensorFixedSize<Scalar_, Eigen::Sizes<Indices...>, Options_, IndexType>> {
using Type = Eigen::TensorFixedSize<Scalar_, Eigen::Sizes<Indices...>, Options_, IndexType>;
using ValidType = void;
static constexpr Eigen::DSizes<typename Type::Index, Type::NumIndices>
get_shape(const Type & /*f*/) {
return get_shape();
}
static constexpr Eigen::DSizes<typename Type::Index, Type::NumIndices> get_shape() {
return Eigen::DSizes<typename Type::Index, Type::NumIndices>(Indices...);
}
static bool
is_correct_shape(const Eigen::DSizes<typename Type::Index, Type::NumIndices> &shape) {
return get_shape() == shape;
}
static constexpr auto dimensions_descriptor
= ::pybind11::detail::concat(const_name<Indices>()...);
template <typename... Args>
static Type *alloc(Args &&...args) {
Eigen::aligned_allocator<Type> allocator;
return ::new (allocator.allocate(1)) Type(std::forward<Args>(args)...);
}
static void free(Type *tensor) {
Eigen::aligned_allocator<Type> allocator;
tensor->~Type();
allocator.deallocate(tensor, 1);
}
};
template <typename Type, bool ShowDetails, bool NeedsWriteable = false>
struct get_tensor_descriptor {
static constexpr auto details
= const_name<NeedsWriteable>(", flags.writeable", "")
+ const_name<static_cast<int>(Type::Layout) == static_cast<int>(Eigen::RowMajor)>(
", flags.c_contiguous", ", flags.f_contiguous");
static constexpr auto value
= const_name("numpy.ndarray[") + npy_format_descriptor<typename Type::Scalar>::name
+ const_name("[") + eigen_tensor_helper<remove_cv_t<Type>>::dimensions_descriptor
+ const_name("]") + const_name<ShowDetails>(details, const_name("")) + const_name("]");
};
// When EIGEN_AVOID_STL_ARRAY is defined, Eigen::DSizes<T, 0> does not have the begin() member
// function. Falling back to a simple loop works around this issue.
//
// We need to disable the type-limits warning for the inner loop when size = 0.
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_GCC("-Wtype-limits")
template <typename T, int size>
std::vector<T> convert_dsizes_to_vector(const Eigen::DSizes<T, size> &arr) {
std::vector<T> result(size);
for (size_t i = 0; i < size; i++) {
result[i] = arr[i];
}
return result;
}
template <typename T, int size>
Eigen::DSizes<T, size> get_shape_for_array(const array &arr) {
Eigen::DSizes<T, size> result;
const T *shape = arr.shape();
for (size_t i = 0; i < size; i++) {
result[i] = shape[i];
}
return result;
}
PYBIND11_WARNING_POP
template <typename Type>
struct type_caster<Type, typename eigen_tensor_helper<Type>::ValidType> {
static_assert(!std::is_pointer<typename Type::Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
using Helper = eigen_tensor_helper<Type>;
static constexpr auto temp_name = get_tensor_descriptor<Type, false>::value;
PYBIND11_TYPE_CASTER(Type, temp_name);
bool load(handle src, bool convert) {
if (!convert) {
if (!isinstance<array>(src)) {
return false;
}
array temp = array::ensure(src);
if (!temp) {
return false;
}
if (!temp.dtype().is(dtype::of<typename Type::Scalar>())) {
return false;
}
}
array_t<typename Type::Scalar, compute_array_flag_from_tensor<Type>()> arr(
reinterpret_borrow<object>(src));
if (arr.ndim() != Type::NumIndices) {
return false;
}
auto shape = get_shape_for_array<typename Type::Index, Type::NumIndices>(arr);
if (!Helper::is_correct_shape(shape)) {
return false;
}
#if EIGEN_VERSION_AT_LEAST(3, 4, 0)
auto data_pointer = arr.data();
#else
// Handle Eigen bug
auto data_pointer = const_cast<typename Type::Scalar *>(arr.data());
#endif
if (is_tensor_aligned(arr.data())) {
value = Eigen::TensorMap<const Type, Eigen::Aligned>(data_pointer, shape);
} else {
value = Eigen::TensorMap<const Type>(data_pointer, shape);
}
return true;
}
static handle cast(Type &&src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::reference
|| policy == return_value_policy::reference_internal) {
pybind11_fail("Cannot use a reference return value policy for an rvalue");
}
return cast_impl(&src, return_value_policy::move, parent);
}
static handle cast(const Type &&src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::reference
|| policy == return_value_policy::reference_internal) {
pybind11_fail("Cannot use a reference return value policy for an rvalue");
}
return cast_impl(&src, return_value_policy::move, parent);
}
static handle cast(Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast_impl(&src, policy, parent);
}
static handle cast(const Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast(&src, policy, parent);
}
static handle cast(Type *src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic) {
policy = return_value_policy::take_ownership;
} else if (policy == return_value_policy::automatic_reference) {
policy = return_value_policy::reference;
}
return cast_impl(src, policy, parent);
}
static handle cast(const Type *src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic) {
policy = return_value_policy::take_ownership;
} else if (policy == return_value_policy::automatic_reference) {
policy = return_value_policy::reference;
}
return cast_impl(src, policy, parent);
}
template <typename C>
static handle cast_impl(C *src, return_value_policy policy, handle parent) {
object parent_object;
bool writeable = false;
switch (policy) {
case return_value_policy::move:
if (std::is_const<C>::value) {
pybind11_fail("Cannot move from a constant reference");
}
src = Helper::alloc(std::move(*src));
parent_object
= capsule(src, [](void *ptr) { Helper::free(reinterpret_cast<Type *>(ptr)); });
writeable = true;
break;
case return_value_policy::take_ownership:
if (std::is_const<C>::value) {
// This cast is ugly, and might be UB in some cases, but we don't have an
// alternative here as we must free that memory
Helper::free(const_cast<Type *>(src));
pybind11_fail("Cannot take ownership of a const reference");
}
parent_object
= capsule(src, [](void *ptr) { Helper::free(reinterpret_cast<Type *>(ptr)); });
writeable = true;
break;
case return_value_policy::copy:
writeable = true;
break;
case return_value_policy::reference:
parent_object = none();
writeable = !std::is_const<C>::value;
break;
case return_value_policy::reference_internal:
// Default should do the right thing
if (!parent) {
pybind11_fail("Cannot use reference internal when there is no parent");
}
parent_object = reinterpret_borrow<object>(parent);
writeable = !std::is_const<C>::value;
break;
default:
pybind11_fail("pybind11 bug in eigen.h, please file a bug report");
}
auto result = array_t<typename Type::Scalar, compute_array_flag_from_tensor<Type>()>(
convert_dsizes_to_vector(Helper::get_shape(*src)), src->data(), parent_object);
if (!writeable) {
array_proxy(result.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
}
return result.release();
}
};
template <typename StoragePointerType,
bool needs_writeable,
enable_if_t<!needs_writeable, bool> = true>
StoragePointerType get_array_data_for_type(array &arr) {
#if EIGEN_VERSION_AT_LEAST(3, 4, 0)
return reinterpret_cast<StoragePointerType>(arr.data());
#else
// Handle Eigen bug
return reinterpret_cast<StoragePointerType>(const_cast<void *>(arr.data()));
#endif
}
template <typename StoragePointerType,
bool needs_writeable,
enable_if_t<needs_writeable, bool> = true>
StoragePointerType get_array_data_for_type(array &arr) {
return reinterpret_cast<StoragePointerType>(arr.mutable_data());
}
template <typename T, typename = void>
struct get_storage_pointer_type;
template <typename MapType>
struct get_storage_pointer_type<MapType, void_t<typename MapType::StoragePointerType>> {
using SPT = typename MapType::StoragePointerType;
};
template <typename MapType>
struct get_storage_pointer_type<MapType, void_t<typename MapType::PointerArgType>> {
using SPT = typename MapType::PointerArgType;
};
template <typename Type, int Options>
struct type_caster<Eigen::TensorMap<Type, Options>,
typename eigen_tensor_helper<remove_cv_t<Type>>::ValidType> {
static_assert(!std::is_pointer<typename Type::Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
using MapType = Eigen::TensorMap<Type, Options>;
using Helper = eigen_tensor_helper<remove_cv_t<Type>>;
bool load(handle src, bool /*convert*/) {
// Note that we have a lot more checks here as we want to make sure to avoid copies
if (!isinstance<array>(src)) {
return false;
}
auto arr = reinterpret_borrow<array>(src);
if ((arr.flags() & compute_array_flag_from_tensor<Type>()) == 0) {
return false;
}
if (!arr.dtype().is(dtype::of<typename Type::Scalar>())) {
return false;
}
if (arr.ndim() != Type::NumIndices) {
return false;
}
constexpr bool is_aligned = (Options & Eigen::Aligned) != 0;
if (is_aligned && !is_tensor_aligned(arr.data())) {
return false;
}
auto shape = get_shape_for_array<typename Type::Index, Type::NumIndices>(arr);
if (!Helper::is_correct_shape(shape)) {
return false;
}
if (needs_writeable && !arr.writeable()) {
return false;
}
auto result = get_array_data_for_type<typename get_storage_pointer_type<MapType>::SPT,
needs_writeable>(arr);
value.reset(new MapType(std::move(result), std::move(shape)));
return true;
}
static handle cast(MapType &&src, return_value_policy policy, handle parent) {
return cast_impl(&src, policy, parent);
}
static handle cast(const MapType &&src, return_value_policy policy, handle parent) {
return cast_impl(&src, policy, parent);
}
static handle cast(MapType &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast_impl(&src, policy, parent);
}
static handle cast(const MapType &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast(&src, policy, parent);
}
static handle cast(MapType *src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic) {
policy = return_value_policy::take_ownership;
} else if (policy == return_value_policy::automatic_reference) {
policy = return_value_policy::reference;
}
return cast_impl(src, policy, parent);
}
static handle cast(const MapType *src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic) {
policy = return_value_policy::take_ownership;
} else if (policy == return_value_policy::automatic_reference) {
policy = return_value_policy::reference;
}
return cast_impl(src, policy, parent);
}
template <typename C>
static handle cast_impl(C *src, return_value_policy policy, handle parent) {
object parent_object;
constexpr bool writeable = !std::is_const<C>::value;
switch (policy) {
case return_value_policy::reference:
parent_object = none();
break;
case return_value_policy::reference_internal:
// Default should do the right thing
if (!parent) {
pybind11_fail("Cannot use reference internal when there is no parent");
}
parent_object = reinterpret_borrow<object>(parent);
break;
default:
// move, take_ownership don't make any sense for a ref/map:
pybind11_fail("Invalid return_value_policy for Eigen Map type, must be either "
"reference or reference_internal");
}
auto result = array_t<typename Type::Scalar, compute_array_flag_from_tensor<Type>()>(
convert_dsizes_to_vector(Helper::get_shape(*src)),
src->data(),
std::move(parent_object));
if (!writeable) {
array_proxy(result.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
}
return result.release();
}
#if EIGEN_VERSION_AT_LEAST(3, 4, 0)
static constexpr bool needs_writeable = !std::is_const<typename std::remove_pointer<
typename get_storage_pointer_type<MapType>::SPT>::type>::value;
#else
// Handle Eigen bug
static constexpr bool needs_writeable = !std::is_const<Type>::value;
#endif
protected:
// TODO: Move to std::optional once std::optional has more support
std::unique_ptr<MapType> value;
public:
static constexpr auto name = get_tensor_descriptor<Type, true, needs_writeable>::value;
explicit operator MapType *() { return value.get(); }
explicit operator MapType &() { return *value; }
explicit operator MapType &&() && { return std::move(*value); }
template <typename T_>
using cast_op_type = ::pybind11::detail::movable_cast_op_type<T_>;
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
======================================================================================================================
SOURCE CODE FILE: embed.h
LINES: 1
SIZE: 13.35 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\embed.h
ENCODING: utf-8
```h
/*
pybind11/embed.h: Support for embedding the interpreter
Copyright (c) 2017 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
#include "eval.h"
#include <memory>
#include <vector>
#if defined(PYPY_VERSION)
# error Embedding the interpreter is not supported with PyPy
#endif
#define PYBIND11_EMBEDDED_MODULE_IMPL(name) \
extern "C" PyObject *pybind11_init_impl_##name(); \
extern "C" PyObject *pybind11_init_impl_##name() { return pybind11_init_wrapper_##name(); }
/** \rst
Add a new module to the table of builtins for the interpreter. Must be
defined in global scope. The first macro parameter is the name of the
module (without quotes). The second parameter is the variable which will
be used as the interface to add functions and classes to the module.
.. code-block:: cpp
PYBIND11_EMBEDDED_MODULE(example, m) {
// ... initialize functions and classes here
m.def("foo", []() {
return "Hello, World!";
});
}
\endrst */
#define PYBIND11_EMBEDDED_MODULE(name, variable) \
static ::pybind11::module_::module_def PYBIND11_CONCAT(pybind11_module_def_, name); \
static void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &); \
static PyObject PYBIND11_CONCAT(*pybind11_init_wrapper_, name)() { \
auto m = ::pybind11::module_::create_extension_module( \
PYBIND11_TOSTRING(name), nullptr, &PYBIND11_CONCAT(pybind11_module_def_, name)); \
try { \
PYBIND11_CONCAT(pybind11_init_, name)(m); \
return m.ptr(); \
} \
PYBIND11_CATCH_INIT_EXCEPTIONS \
} \
PYBIND11_EMBEDDED_MODULE_IMPL(name) \
::pybind11::detail::embedded_module PYBIND11_CONCAT(pybind11_module_, name)( \
PYBIND11_TOSTRING(name), PYBIND11_CONCAT(pybind11_init_impl_, name)); \
void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ \
& variable) // NOLINT(bugprone-macro-parentheses)
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
/// Python 2.7/3.x compatible version of `PyImport_AppendInittab` and error checks.
struct embedded_module {
using init_t = PyObject *(*) ();
embedded_module(const char *name, init_t init) {
if (Py_IsInitialized() != 0) {
pybind11_fail("Can't add new modules after the interpreter has been initialized");
}
auto result = PyImport_AppendInittab(name, init);
if (result == -1) {
pybind11_fail("Insufficient memory to add a new module");
}
}
};
struct wide_char_arg_deleter {
void operator()(wchar_t *ptr) const {
// API docs: https://docs.python.org/3/c-api/sys.html#c.Py_DecodeLocale
PyMem_RawFree(ptr);
}
};
inline wchar_t *widen_chars(const char *safe_arg) {
wchar_t *widened_arg = Py_DecodeLocale(safe_arg, nullptr);
return widened_arg;
}
inline void precheck_interpreter() {
if (Py_IsInitialized() != 0) {
pybind11_fail("The interpreter is already running");
}
}
#if !defined(PYBIND11_PYCONFIG_SUPPORT_PY_VERSION_HEX)
# define PYBIND11_PYCONFIG_SUPPORT_PY_VERSION_HEX (0x03080000)
#endif
#if PY_VERSION_HEX < PYBIND11_PYCONFIG_SUPPORT_PY_VERSION_HEX
inline void initialize_interpreter_pre_pyconfig(bool init_signal_handlers,
int argc,
const char *const *argv,
bool add_program_dir_to_path) {
detail::precheck_interpreter();
Py_InitializeEx(init_signal_handlers ? 1 : 0);
// Before it was special-cased in python 3.8, passing an empty or null argv
// caused a segfault, so we have to reimplement the special case ourselves.
bool special_case = (argv == nullptr || argc <= 0);
const char *const empty_argv[]{"\0"};
const char *const *safe_argv = special_case ? empty_argv : argv;
if (special_case) {
argc = 1;
}
auto argv_size = static_cast<size_t>(argc);
// SetArgv* on python 3 takes wchar_t, so we have to convert.
std::unique_ptr<wchar_t *[]> widened_argv(new wchar_t *[argv_size]);
std::vector<std::unique_ptr<wchar_t[], detail::wide_char_arg_deleter>> widened_argv_entries;
widened_argv_entries.reserve(argv_size);
for (size_t ii = 0; ii < argv_size; ++ii) {
widened_argv_entries.emplace_back(detail::widen_chars(safe_argv[ii]));
if (!widened_argv_entries.back()) {
// A null here indicates a character-encoding failure or the python
// interpreter out of memory. Give up.
return;
}
widened_argv[ii] = widened_argv_entries.back().get();
}
auto *pysys_argv = widened_argv.get();
PySys_SetArgvEx(argc, pysys_argv, static_cast<int>(add_program_dir_to_path));
}
#endif
PYBIND11_NAMESPACE_END(detail)
#if PY_VERSION_HEX >= PYBIND11_PYCONFIG_SUPPORT_PY_VERSION_HEX
inline void initialize_interpreter(PyConfig *config,
int argc = 0,
const char *const *argv = nullptr,
bool add_program_dir_to_path = true) {
detail::precheck_interpreter();
PyStatus status = PyConfig_SetBytesArgv(config, argc, const_cast<char *const *>(argv));
if (PyStatus_Exception(status) != 0) {
// A failure here indicates a character-encoding failure or the python
// interpreter out of memory. Give up.
PyConfig_Clear(config);
throw std::runtime_error(PyStatus_IsError(status) != 0 ? status.err_msg
: "Failed to prepare CPython");
}
status = Py_InitializeFromConfig(config);
if (PyStatus_Exception(status) != 0) {
PyConfig_Clear(config);
throw std::runtime_error(PyStatus_IsError(status) != 0 ? status.err_msg
: "Failed to init CPython");
}
if (add_program_dir_to_path) {
PyRun_SimpleString("import sys, os.path; "
"sys.path.insert(0, "
"os.path.abspath(os.path.dirname(sys.argv[0])) "
"if sys.argv and os.path.exists(sys.argv[0]) else '')");
}
PyConfig_Clear(config);
}
#endif
/** \rst
Initialize the Python interpreter. No other pybind11 or CPython API functions can be
called before this is done; with the exception of `PYBIND11_EMBEDDED_MODULE`. The
optional `init_signal_handlers` parameter can be used to skip the registration of
signal handlers (see the `Python documentation`_ for details). Calling this function
again after the interpreter has already been initialized is a fatal error.
If initializing the Python interpreter fails, then the program is terminated. (This
is controlled by the CPython runtime and is an exception to pybind11's normal behavior
of throwing exceptions on errors.)
The remaining optional parameters, `argc`, `argv`, and `add_program_dir_to_path` are
used to populate ``sys.argv`` and ``sys.path``.
See the |PySys_SetArgvEx documentation|_ for details.
.. _Python documentation: https://docs.python.org/3/c-api/init.html#c.Py_InitializeEx
.. |PySys_SetArgvEx documentation| replace:: ``PySys_SetArgvEx`` documentation
.. _PySys_SetArgvEx documentation: https://docs.python.org/3/c-api/init.html#c.PySys_SetArgvEx
\endrst */
inline void initialize_interpreter(bool init_signal_handlers = true,
int argc = 0,
const char *const *argv = nullptr,
bool add_program_dir_to_path = true) {
#if PY_VERSION_HEX < PYBIND11_PYCONFIG_SUPPORT_PY_VERSION_HEX
detail::initialize_interpreter_pre_pyconfig(
init_signal_handlers, argc, argv, add_program_dir_to_path);
#else
PyConfig config;
PyConfig_InitPythonConfig(&config);
// See PR #4473 for background
config.parse_argv = 0;
config.install_signal_handlers = init_signal_handlers ? 1 : 0;
initialize_interpreter(&config, argc, argv, add_program_dir_to_path);
#endif
}
/** \rst
Shut down the Python interpreter. No pybind11 or CPython API functions can be called
after this. In addition, pybind11 objects must not outlive the interpreter:
.. code-block:: cpp
{ // BAD
py::initialize_interpreter();
auto hello = py::str("Hello, World!");
py::finalize_interpreter();
} // <-- BOOM, hello's destructor is called after interpreter shutdown
{ // GOOD
py::initialize_interpreter();
{ // scoped
auto hello = py::str("Hello, World!");
} // <-- OK, hello is cleaned up properly
py::finalize_interpreter();
}
{ // BETTER
py::scoped_interpreter guard{};
auto hello = py::str("Hello, World!");
}
.. warning::
The interpreter can be restarted by calling `initialize_interpreter` again.
Modules created using pybind11 can be safely re-initialized. However, Python
itself cannot completely unload binary extension modules and there are several
caveats with regard to interpreter restarting. All the details can be found
in the CPython documentation. In short, not all interpreter memory may be
freed, either due to reference cycles or user-created global data.
\endrst */
inline void finalize_interpreter() {
// Get the internals pointer (without creating it if it doesn't exist). It's possible for the
// internals to be created during Py_Finalize() (e.g. if a py::capsule calls `get_internals()`
// during destruction), so we get the pointer-pointer here and check it after Py_Finalize().
detail::internals **internals_ptr_ptr = detail::get_internals_pp();
// It could also be stashed in state_dict, so look there too:
if (object internals_obj
= get_internals_obj_from_state_dict(detail::get_python_state_dict())) {
internals_ptr_ptr = detail::get_internals_pp_from_capsule(internals_obj);
}
// Local internals contains data managed by the current interpreter, so we must clear them to
// avoid undefined behaviors when initializing another interpreter
detail::get_local_internals().registered_types_cpp.clear();
detail::get_local_internals().registered_exception_translators.clear();
Py_Finalize();
if (internals_ptr_ptr) {
delete *internals_ptr_ptr;
*internals_ptr_ptr = nullptr;
}
}
/** \rst
Scope guard version of `initialize_interpreter` and `finalize_interpreter`.
This a move-only guard and only a single instance can exist.
See `initialize_interpreter` for a discussion of its constructor arguments.
.. code-block:: cpp
#include <pybind11/embed.h>
int main() {
py::scoped_interpreter guard{};
py::print(Hello, World!);
} // <-- interpreter shutdown
\endrst */
class scoped_interpreter {
public:
explicit scoped_interpreter(bool init_signal_handlers = true,
int argc = 0,
const char *const *argv = nullptr,
bool add_program_dir_to_path = true) {
initialize_interpreter(init_signal_handlers, argc, argv, add_program_dir_to_path);
}
#if PY_VERSION_HEX >= PYBIND11_PYCONFIG_SUPPORT_PY_VERSION_HEX
explicit scoped_interpreter(PyConfig *config,
int argc = 0,
const char *const *argv = nullptr,
bool add_program_dir_to_path = true) {
initialize_interpreter(config, argc, argv, add_program_dir_to_path);
}
#endif
scoped_interpreter(const scoped_interpreter &) = delete;
scoped_interpreter(scoped_interpreter &&other) noexcept { other.is_valid = false; }
scoped_interpreter &operator=(const scoped_interpreter &) = delete;
scoped_interpreter &operator=(scoped_interpreter &&) = delete;
~scoped_interpreter() {
if (is_valid) {
finalize_interpreter();
}
}
private:
bool is_valid = true;
};
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=====================================================================================================================
SOURCE CODE FILE: eval.h
LINES: 3
SIZE: 4.77 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\eval.h
ENCODING: utf-8
```h
/*
pybind11/eval.h: Support for evaluating Python expressions and statements
from strings and files
Copyright (c) 2016 Klemens Morgenstern <[email protected]> and
Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
#include <utility>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
inline void ensure_builtins_in_globals(object &global) {
#if defined(PYPY_VERSION) || PY_VERSION_HEX < 0x03080000
// Running exec and eval adds `builtins` module under `__builtins__` key to
// globals if not yet present. Python 3.8 made PyRun_String behave
// similarly. Let's also do that for older versions, for consistency. This
// was missing from PyPy3.8 7.3.7.
if (!global.contains("__builtins__"))
global["__builtins__"] = module_::import(PYBIND11_BUILTINS_MODULE);
#else
(void) global;
#endif
}
PYBIND11_NAMESPACE_END(detail)
enum eval_mode {
/// Evaluate a string containing an isolated expression
eval_expr,
/// Evaluate a string containing a single statement. Returns \c none
eval_single_statement,
/// Evaluate a string containing a sequence of statement. Returns \c none
eval_statements
};
template <eval_mode mode = eval_expr>
object eval(const str &expr, object global = globals(), object local = object()) {
if (!local) {
local = global;
}
detail::ensure_builtins_in_globals(global);
/* PyRun_String does not accept a PyObject / encoding specifier,
this seems to be the only alternative */
std::string buffer = "# -*- coding: utf-8 -*-\n" + (std::string) expr;
int start = 0;
switch (mode) {
case eval_expr:
start = Py_eval_input;
break;
case eval_single_statement:
start = Py_single_input;
break;
case eval_statements:
start = Py_file_input;
break;
default:
pybind11_fail("invalid evaluation mode");
}
PyObject *result = PyRun_String(buffer.c_str(), start, global.ptr(), local.ptr());
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
template <eval_mode mode = eval_expr, size_t N>
object eval(const char (&s)[N], object global = globals(), object local = object()) {
/* Support raw string literals by removing common leading whitespace */
auto expr = (s[0] == '\n') ? str(module_::import("textwrap").attr("dedent")(s)) : str(s);
return eval<mode>(expr, std::move(global), std::move(local));
}
inline void exec(const str &expr, object global = globals(), object local = object()) {
eval<eval_statements>(expr, std::move(global), std::move(local));
}
template <size_t N>
void exec(const char (&s)[N], object global = globals(), object local = object()) {
eval<eval_statements>(s, std::move(global), std::move(local));
}
#if defined(PYPY_VERSION)
template <eval_mode mode = eval_statements>
object eval_file(str, object, object) {
pybind11_fail("eval_file not supported in PyPy3. Use eval");
}
template <eval_mode mode = eval_statements>
object eval_file(str, object) {
pybind11_fail("eval_file not supported in PyPy3. Use eval");
}
template <eval_mode mode = eval_statements>
object eval_file(str) {
pybind11_fail("eval_file not supported in PyPy3. Use eval");
}
#else
template <eval_mode mode = eval_statements>
object eval_file(str fname, object global = globals(), object local = object()) {
if (!local) {
local = global;
}
detail::ensure_builtins_in_globals(global);
int start = 0;
switch (mode) {
case eval_expr:
start = Py_eval_input;
break;
case eval_single_statement:
start = Py_single_input;
break;
case eval_statements:
start = Py_file_input;
break;
default:
pybind11_fail("invalid evaluation mode");
}
int closeFile = 1;
std::string fname_str = (std::string) fname;
FILE *f = _Py_fopen_obj(fname.ptr(), "r");
if (!f) {
PyErr_Clear();
pybind11_fail("File \"" + fname_str + "\" could not be opened!");
}
if (!global.contains("__file__")) {
global["__file__"] = std::move(fname);
}
PyObject *result
= PyRun_FileEx(f, fname_str.c_str(), start, global.ptr(), local.ptr(), closeFile);
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
#endif
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
===========================================================================================================================
SOURCE CODE FILE: functional.h
LINES: 1
SIZE: 5.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\functional.h
ENCODING: utf-8
```h
/*
pybind11/functional.h: std::function<> support
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#define PYBIND11_HAS_TYPE_CASTER_STD_FUNCTION_SPECIALIZATIONS
#include "pybind11.h"
#include <functional>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
PYBIND11_NAMESPACE_BEGIN(type_caster_std_function_specializations)
// ensure GIL is held during functor destruction
struct func_handle {
function f;
#if !(defined(_MSC_VER) && _MSC_VER == 1916 && defined(PYBIND11_CPP17))
// This triggers a syntax error under very special conditions (very weird indeed).
explicit
#endif
func_handle(function &&f_) noexcept
: f(std::move(f_)) {
}
func_handle(const func_handle &f_) { operator=(f_); }
func_handle &operator=(const func_handle &f_) {
gil_scoped_acquire acq;
f = f_.f;
return *this;
}
~func_handle() {
gil_scoped_acquire acq;
function kill_f(std::move(f));
}
};
// to emulate 'move initialization capture' in C++11
struct func_wrapper_base {
func_handle hfunc;
explicit func_wrapper_base(func_handle &&hf) noexcept : hfunc(hf) {}
};
template <typename Return, typename... Args>
struct func_wrapper : func_wrapper_base {
using func_wrapper_base::func_wrapper_base;
Return operator()(Args... args) const {
gil_scoped_acquire acq;
// casts the returned object as a rvalue to the return type
return hfunc.f(std::forward<Args>(args)...).template cast<Return>();
}
};
PYBIND11_NAMESPACE_END(type_caster_std_function_specializations)
template <typename Return, typename... Args>
struct type_caster<std::function<Return(Args...)>> {
using type = std::function<Return(Args...)>;
using retval_type = conditional_t<std::is_same<Return, void>::value, void_type, Return>;
using function_type = Return (*)(Args...);
public:
bool load(handle src, bool convert) {
if (src.is_none()) {
// Defer accepting None to other overloads (if we aren't in convert mode):
if (!convert) {
return false;
}
return true;
}
if (!isinstance<function>(src)) {
return false;
}
auto func = reinterpret_borrow<function>(src);
/*
When passing a C++ function as an argument to another C++
function via Python, every function call would normally involve
a full C++ -> Python -> C++ roundtrip, which can be prohibitive.
Here, we try to at least detect the case where the function is
stateless (i.e. function pointer or lambda function without
captured variables), in which case the roundtrip can be avoided.
*/
if (auto cfunc = func.cpp_function()) {
auto *cfunc_self = PyCFunction_GET_SELF(cfunc.ptr());
if (cfunc_self == nullptr) {
PyErr_Clear();
} else if (isinstance<capsule>(cfunc_self)) {
auto c = reinterpret_borrow<capsule>(cfunc_self);
function_record *rec = nullptr;
// Check that we can safely reinterpret the capsule into a function_record
if (detail::is_function_record_capsule(c)) {
rec = c.get_pointer<function_record>();
}
while (rec != nullptr) {
if (rec->is_stateless
&& same_type(typeid(function_type),
*reinterpret_cast<const std::type_info *>(rec->data[1]))) {
struct capture {
function_type f;
};
value = ((capture *) &rec->data)->f;
return true;
}
rec = rec->next;
}
}
// PYPY segfaults here when passing builtin function like sum.
// Raising an fail exception here works to prevent the segfault, but only on gcc.
// See PR #1413 for full details
}
value = type_caster_std_function_specializations::func_wrapper<Return, Args...>(
type_caster_std_function_specializations::func_handle(std::move(func)));
return true;
}
template <typename Func>
static handle cast(Func &&f_, return_value_policy policy, handle /* parent */) {
if (!f_) {
return none().release();
}
auto result = f_.template target<function_type>();
if (result) {
return cpp_function(*result, policy).release();
}
return cpp_function(std::forward<Func>(f_), policy).release();
}
PYBIND11_TYPE_CASTER(type,
const_name("Callable[[")
+ ::pybind11::detail::concat(make_caster<Args>::name...)
+ const_name("], ") + make_caster<retval_type>::name
+ const_name("]"));
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
====================================================================================================================
SOURCE CODE FILE: gil.h
LINES: 1
SIZE: 7.74 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\gil.h
ENCODING: utf-8
```h
/*
pybind11/gil.h: RAII helpers for managing the GIL
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
#include <cassert>
#if !defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
# include "detail/internals.h"
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
// forward declarations
PyThreadState *get_thread_state_unchecked();
PYBIND11_NAMESPACE_END(detail)
#if !defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
/* The functions below essentially reproduce the PyGILState_* API using a RAII
* pattern, but there are a few important differences:
*
* 1. When acquiring the GIL from an non-main thread during the finalization
* phase, the GILState API blindly terminates the calling thread, which
* is often not what is wanted. This API does not do this.
*
* 2. The gil_scoped_release function can optionally cut the relationship
* of a PyThreadState and its associated thread, which allows moving it to
* another thread (this is a fairly rare/advanced use case).
*
* 3. The reference count of an acquired thread state can be controlled. This
* can be handy to prevent cases where callbacks issued from an external
* thread would otherwise constantly construct and destroy thread state data
* structures.
*
* See the Python bindings of NanoGUI (http://github.com/wjakob/nanogui) for an
* example which uses features 2 and 3 to migrate the Python thread of
* execution to another thread (to run the event loop on the original thread,
* in this case).
*/
class gil_scoped_acquire {
public:
PYBIND11_NOINLINE gil_scoped_acquire() {
auto &internals = detail::get_internals();
tstate = (PyThreadState *) PYBIND11_TLS_GET_VALUE(internals.tstate);
if (!tstate) {
/* Check if the GIL was acquired using the PyGILState_* API instead (e.g. if
calling from a Python thread). Since we use a different key, this ensures
we don't create a new thread state and deadlock in PyEval_AcquireThread
below. Note we don't save this state with internals.tstate, since we don't
create it we would fail to clear it (its reference count should be > 0). */
tstate = PyGILState_GetThisThreadState();
}
if (!tstate) {
tstate = PyThreadState_New(internals.istate);
# if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
if (!tstate) {
pybind11_fail("scoped_acquire: could not create thread state!");
}
# endif
tstate->gilstate_counter = 0;
PYBIND11_TLS_REPLACE_VALUE(internals.tstate, tstate);
} else {
release = detail::get_thread_state_unchecked() != tstate;
}
if (release) {
PyEval_AcquireThread(tstate);
}
inc_ref();
}
gil_scoped_acquire(const gil_scoped_acquire &) = delete;
gil_scoped_acquire &operator=(const gil_scoped_acquire &) = delete;
void inc_ref() { ++tstate->gilstate_counter; }
PYBIND11_NOINLINE void dec_ref() {
--tstate->gilstate_counter;
# if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
if (detail::get_thread_state_unchecked() != tstate) {
pybind11_fail("scoped_acquire::dec_ref(): thread state must be current!");
}
if (tstate->gilstate_counter < 0) {
pybind11_fail("scoped_acquire::dec_ref(): reference count underflow!");
}
# endif
if (tstate->gilstate_counter == 0) {
# if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
if (!release) {
pybind11_fail("scoped_acquire::dec_ref(): internal error!");
}
# endif
PyThreadState_Clear(tstate);
if (active) {
PyThreadState_DeleteCurrent();
}
PYBIND11_TLS_DELETE_VALUE(detail::get_internals().tstate);
release = false;
}
}
/// This method will disable the PyThreadState_DeleteCurrent call and the
/// GIL won't be acquired. This method should be used if the interpreter
/// could be shutting down when this is called, as thread deletion is not
/// allowed during shutdown. Check _Py_IsFinalizing() on Python 3.7+, and
/// protect subsequent code.
PYBIND11_NOINLINE void disarm() { active = false; }
PYBIND11_NOINLINE ~gil_scoped_acquire() {
dec_ref();
if (release) {
PyEval_SaveThread();
}
}
private:
PyThreadState *tstate = nullptr;
bool release = true;
bool active = true;
};
class gil_scoped_release {
public:
// PRECONDITION: The GIL must be held when this constructor is called.
explicit gil_scoped_release(bool disassoc = false) : disassoc(disassoc) {
assert(PyGILState_Check());
// `get_internals()` must be called here unconditionally in order to initialize
// `internals.tstate` for subsequent `gil_scoped_acquire` calls. Otherwise, an
// initialization race could occur as multiple threads try `gil_scoped_acquire`.
auto &internals = detail::get_internals();
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
tstate = PyEval_SaveThread();
if (disassoc) {
// Python >= 3.7 can remove this, it's an int before 3.7
// NOLINTNEXTLINE(readability-qualified-auto)
auto key = internals.tstate;
PYBIND11_TLS_DELETE_VALUE(key);
}
}
gil_scoped_release(const gil_scoped_release &) = delete;
gil_scoped_release &operator=(const gil_scoped_release &) = delete;
/// This method will disable the PyThreadState_DeleteCurrent call and the
/// GIL won't be acquired. This method should be used if the interpreter
/// could be shutting down when this is called, as thread deletion is not
/// allowed during shutdown. Check _Py_IsFinalizing() on Python 3.7+, and
/// protect subsequent code.
PYBIND11_NOINLINE void disarm() { active = false; }
~gil_scoped_release() {
if (!tstate) {
return;
}
// `PyEval_RestoreThread()` should not be called if runtime is finalizing
if (active) {
PyEval_RestoreThread(tstate);
}
if (disassoc) {
// Python >= 3.7 can remove this, it's an int before 3.7
// NOLINTNEXTLINE(readability-qualified-auto)
auto key = detail::get_internals().tstate;
PYBIND11_TLS_REPLACE_VALUE(key, tstate);
}
}
private:
PyThreadState *tstate;
bool disassoc;
bool active = true;
};
#else // PYBIND11_SIMPLE_GIL_MANAGEMENT
class gil_scoped_acquire {
PyGILState_STATE state;
public:
gil_scoped_acquire() : state{PyGILState_Ensure()} {}
gil_scoped_acquire(const gil_scoped_acquire &) = delete;
gil_scoped_acquire &operator=(const gil_scoped_acquire &) = delete;
~gil_scoped_acquire() { PyGILState_Release(state); }
void disarm() {}
};
class gil_scoped_release {
PyThreadState *state;
public:
// PRECONDITION: The GIL must be held when this constructor is called.
gil_scoped_release() {
assert(PyGILState_Check());
state = PyEval_SaveThread();
}
gil_scoped_release(const gil_scoped_release &) = delete;
gil_scoped_release &operator=(const gil_scoped_release &) = delete;
~gil_scoped_release() { PyEval_RestoreThread(state); }
void disarm() {}
};
#endif // PYBIND11_SIMPLE_GIL_MANAGEMENT
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
===================================================================================================================================
SOURCE CODE FILE: gil_safe_call_once.h
LINES: 1
SIZE: 4.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\gil_safe_call_once.h
ENCODING: utf-8
```h
// Copyright (c) 2023 The pybind Community.
#pragma once
#include "detail/common.h"
#include "gil.h"
#include <cassert>
#include <mutex>
#ifdef Py_GIL_DISABLED
# include <atomic>
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
// Use the `gil_safe_call_once_and_store` class below instead of the naive
//
// static auto imported_obj = py::module_::import("module_name"); // BAD, DO NOT USE!
//
// which has two serious issues:
//
// 1. Py_DECREF() calls potentially after the Python interpreter was finalized already, and
// 2. deadlocks in multi-threaded processes (because of missing lock ordering).
//
// The following alternative avoids both problems:
//
// PYBIND11_CONSTINIT static py::gil_safe_call_once_and_store<py::object> storage;
// auto &imported_obj = storage // Do NOT make this `static`!
// .call_once_and_store_result([]() {
// return py::module_::import("module_name");
// })
// .get_stored();
//
// The parameter of `call_once_and_store_result()` must be callable. It can make
// CPython API calls, and in particular, it can temporarily release the GIL.
//
// `T` can be any C++ type, it does not have to involve CPython API types.
//
// The behavior with regard to signals, e.g. `SIGINT` (`KeyboardInterrupt`),
// is not ideal. If the main thread is the one to actually run the `Callable`,
// then a `KeyboardInterrupt` will interrupt it if it is running normal Python
// code. The situation is different if a non-main thread runs the
// `Callable`, and then the main thread starts waiting for it to complete:
// a `KeyboardInterrupt` will not interrupt the non-main thread, but it will
// get processed only when it is the main thread's turn again and it is running
// normal Python code. However, this will be unnoticeable for quick call-once
// functions, which is usually the case.
template <typename T>
class gil_safe_call_once_and_store {
public:
// PRECONDITION: The GIL must be held when `call_once_and_store_result()` is called.
template <typename Callable>
gil_safe_call_once_and_store &call_once_and_store_result(Callable &&fn) {
if (!is_initialized_) { // This read is guarded by the GIL.
// Multiple threads may enter here, because the GIL is released in the next line and
// CPython API calls in the `fn()` call below may release and reacquire the GIL.
gil_scoped_release gil_rel; // Needed to establish lock ordering.
std::call_once(once_flag_, [&] {
// Only one thread will ever enter here.
gil_scoped_acquire gil_acq;
::new (storage_) T(fn()); // fn may release, but will reacquire, the GIL.
is_initialized_ = true; // This write is guarded by the GIL.
});
// All threads will observe `is_initialized_` as true here.
}
// Intentionally not returning `T &` to ensure the calling code is self-documenting.
return *this;
}
// This must only be called after `call_once_and_store_result()` was called.
T &get_stored() {
assert(is_initialized_);
PYBIND11_WARNING_PUSH
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 5
// Needed for gcc 4.8.5
PYBIND11_WARNING_DISABLE_GCC("-Wstrict-aliasing")
#endif
return *reinterpret_cast<T *>(storage_);
PYBIND11_WARNING_POP
}
constexpr gil_safe_call_once_and_store() = default;
PYBIND11_DTOR_CONSTEXPR ~gil_safe_call_once_and_store() = default;
private:
alignas(T) char storage_[sizeof(T)] = {};
std::once_flag once_flag_ = {};
#ifdef Py_GIL_DISABLED
std::atomic_bool
#else
bool
#endif
is_initialized_{false};
// The `is_initialized_`-`storage_` pair is very similar to `std::optional`,
// but the latter does not have the triviality properties of former,
// therefore `std::optional` is not a viable alternative here.
};
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=========================================================================================================================
SOURCE CODE FILE: iostream.h
LINES: 1
SIZE: 8.91 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\iostream.h
ENCODING: utf-8
```h
/*
pybind11/iostream.h -- Tools to assist with redirecting cout and cerr to Python
Copyright (c) 2017 Henry F. Schreiner
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
WARNING: The implementation in this file is NOT thread safe. Multiple
threads writing to a redirected ostream concurrently cause data races
and potentially buffer overflows. Therefore it is currently a requirement
that all (possibly) concurrent redirected ostream writes are protected by
a mutex.
#HelpAppreciated: Work on iostream.h thread safety.
For more background see the discussions under
https://github.com/pybind/pybind11/pull/2982 and
https://github.com/pybind/pybind11/pull/2995.
*/
#pragma once
#include "pybind11.h"
#include <algorithm>
#include <cstring>
#include <iostream>
#include <iterator>
#include <memory>
#include <ostream>
#include <streambuf>
#include <string>
#include <utility>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
// Buffer that writes to Python instead of C++
class pythonbuf : public std::streambuf {
private:
using traits_type = std::streambuf::traits_type;
const size_t buf_size;
std::unique_ptr<char[]> d_buffer;
object pywrite;
object pyflush;
int overflow(int c) override {
if (!traits_type::eq_int_type(c, traits_type::eof())) {
*pptr() = traits_type::to_char_type(c);
pbump(1);
}
return sync() == 0 ? traits_type::not_eof(c) : traits_type::eof();
}
// Computes how many bytes at the end of the buffer are part of an
// incomplete sequence of UTF-8 bytes.
// Precondition: pbase() < pptr()
size_t utf8_remainder() const {
const auto rbase = std::reverse_iterator<char *>(pbase());
const auto rpptr = std::reverse_iterator<char *>(pptr());
auto is_ascii = [](char c) { return (static_cast<unsigned char>(c) & 0x80) == 0x00; };
auto is_leading = [](char c) { return (static_cast<unsigned char>(c) & 0xC0) == 0xC0; };
auto is_leading_2b = [](char c) { return static_cast<unsigned char>(c) <= 0xDF; };
auto is_leading_3b = [](char c) { return static_cast<unsigned char>(c) <= 0xEF; };
// If the last character is ASCII, there are no incomplete code points
if (is_ascii(*rpptr)) {
return 0;
}
// Otherwise, work back from the end of the buffer and find the first
// UTF-8 leading byte
const auto rpend = rbase - rpptr >= 3 ? rpptr + 3 : rbase;
const auto leading = std::find_if(rpptr, rpend, is_leading);
if (leading == rbase) {
return 0;
}
const auto dist = static_cast<size_t>(leading - rpptr);
size_t remainder = 0;
if (dist == 0) {
remainder = 1; // 1-byte code point is impossible
} else if (dist == 1) {
remainder = is_leading_2b(*leading) ? 0 : dist + 1;
} else if (dist == 2) {
remainder = is_leading_3b(*leading) ? 0 : dist + 1;
}
// else if (dist >= 3), at least 4 bytes before encountering an UTF-8
// leading byte, either no remainder or invalid UTF-8.
// Invalid UTF-8 will cause an exception later when converting
// to a Python string, so that's not handled here.
return remainder;
}
// This function must be non-virtual to be called in a destructor.
int _sync() {
if (pbase() != pptr()) { // If buffer is not empty
gil_scoped_acquire tmp;
// This subtraction cannot be negative, so dropping the sign.
auto size = static_cast<size_t>(pptr() - pbase());
size_t remainder = utf8_remainder();
if (size > remainder) {
str line(pbase(), size - remainder);
pywrite(std::move(line));
pyflush();
}
// Copy the remainder at the end of the buffer to the beginning:
if (remainder > 0) {
std::memmove(pbase(), pptr() - remainder, remainder);
}
setp(pbase(), epptr());
pbump(static_cast<int>(remainder));
}
return 0;
}
int sync() override { return _sync(); }
public:
explicit pythonbuf(const object &pyostream, size_t buffer_size = 1024)
: buf_size(buffer_size), d_buffer(new char[buf_size]), pywrite(pyostream.attr("write")),
pyflush(pyostream.attr("flush")) {
setp(d_buffer.get(), d_buffer.get() + buf_size - 1);
}
pythonbuf(pythonbuf &&) = default;
/// Sync before destroy
~pythonbuf() override { _sync(); }
};
PYBIND11_NAMESPACE_END(detail)
/** \rst
This a move-only guard that redirects output.
.. code-block:: cpp
#include <pybind11/iostream.h>
...
{
py::scoped_ostream_redirect output;
std::cout << "Hello, World!"; // Python stdout
} // <-- return std::cout to normal
You can explicitly pass the c++ stream and the python object,
for example to guard stderr instead.
.. code-block:: cpp
{
py::scoped_ostream_redirect output{
std::cerr, py::module::import("sys").attr("stderr")};
std::cout << "Hello, World!";
}
\endrst */
class scoped_ostream_redirect {
protected:
std::streambuf *old;
std::ostream &costream;
detail::pythonbuf buffer;
public:
explicit scoped_ostream_redirect(std::ostream &costream = std::cout,
const object &pyostream
= module_::import("sys").attr("stdout"))
: costream(costream), buffer(pyostream) {
old = costream.rdbuf(&buffer);
}
~scoped_ostream_redirect() { costream.rdbuf(old); }
scoped_ostream_redirect(const scoped_ostream_redirect &) = delete;
scoped_ostream_redirect(scoped_ostream_redirect &&other) = default;
scoped_ostream_redirect &operator=(const scoped_ostream_redirect &) = delete;
scoped_ostream_redirect &operator=(scoped_ostream_redirect &&) = delete;
};
/** \rst
Like `scoped_ostream_redirect`, but redirects cerr by default. This class
is provided primary to make ``py::call_guard`` easier to make.
.. code-block:: cpp
m.def("noisy_func", &noisy_func,
py::call_guard<scoped_ostream_redirect,
scoped_estream_redirect>());
\endrst */
class scoped_estream_redirect : public scoped_ostream_redirect {
public:
explicit scoped_estream_redirect(std::ostream &costream = std::cerr,
const object &pyostream
= module_::import("sys").attr("stderr"))
: scoped_ostream_redirect(costream, pyostream) {}
};
PYBIND11_NAMESPACE_BEGIN(detail)
// Class to redirect output as a context manager. C++ backend.
class OstreamRedirect {
bool do_stdout_;
bool do_stderr_;
std::unique_ptr<scoped_ostream_redirect> redirect_stdout;
std::unique_ptr<scoped_estream_redirect> redirect_stderr;
public:
explicit OstreamRedirect(bool do_stdout = true, bool do_stderr = true)
: do_stdout_(do_stdout), do_stderr_(do_stderr) {}
void enter() {
if (do_stdout_) {
redirect_stdout.reset(new scoped_ostream_redirect());
}
if (do_stderr_) {
redirect_stderr.reset(new scoped_estream_redirect());
}
}
void exit() {
redirect_stdout.reset();
redirect_stderr.reset();
}
};
PYBIND11_NAMESPACE_END(detail)
/** \rst
This is a helper function to add a C++ redirect context manager to Python
instead of using a C++ guard. To use it, add the following to your binding code:
.. code-block:: cpp
#include <pybind11/iostream.h>
...
py::add_ostream_redirect(m, "ostream_redirect");
You now have a Python context manager that redirects your output:
.. code-block:: python
with m.ostream_redirect():
m.print_to_cout_function()
This manager can optionally be told which streams to operate on:
.. code-block:: python
with m.ostream_redirect(stdout=true, stderr=true):
m.noisy_function_with_error_printing()
\endrst */
inline class_<detail::OstreamRedirect>
add_ostream_redirect(module_ m, const std::string &name = "ostream_redirect") {
return class_<detail::OstreamRedirect>(std::move(m), name.c_str(), module_local())
.def(init<bool, bool>(), arg("stdout") = true, arg("stderr") = true)
.def("__enter__", &detail::OstreamRedirect::enter)
.def("__exit__", [](detail::OstreamRedirect &self_, const args &) { self_.exit(); });
}
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
======================================================================================================================
SOURCE CODE FILE: numpy.h
LINES: 1
SIZE: 84.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\numpy.h
ENCODING: utf-8
```h
/*
pybind11/numpy.h: Basic NumPy support, vectorize() wrapper
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
#include "detail/common.h"
#include "complex.h"
#include "gil_safe_call_once.h"
#include "pytypes.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <numeric>
#include <sstream>
#include <string>
#include <type_traits>
#include <typeindex>
#include <utility>
#include <vector>
#if defined(PYBIND11_NUMPY_1_ONLY) && !defined(PYBIND11_INTERNAL_NUMPY_1_ONLY_DETECTED)
# error PYBIND11_NUMPY_1_ONLY must be defined before any pybind11 header is included.
#endif
/* This will be true on all flat address space platforms and allows us to reduce the
whole npy_intp / ssize_t / Py_intptr_t business down to just ssize_t for all size
and dimension types (e.g. shape, strides, indexing), instead of inflicting this
upon the library user.
Note that NumPy 2 now uses ssize_t for `npy_intp` to simplify this. */
static_assert(sizeof(::pybind11::ssize_t) == sizeof(Py_intptr_t), "ssize_t != Py_intptr_t");
static_assert(std::is_signed<Py_intptr_t>::value, "Py_intptr_t must be signed");
// We now can reinterpret_cast between py::ssize_t and Py_intptr_t (MSVC + PyPy cares)
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_WARNING_DISABLE_MSVC(4127)
class dtype; // Forward declaration
class array; // Forward declaration
PYBIND11_NAMESPACE_BEGIN(detail)
template <>
struct handle_type_name<dtype> {
static constexpr auto name = const_name("numpy.dtype");
};
template <>
struct handle_type_name<array> {
static constexpr auto name = const_name("numpy.ndarray");
};
template <typename type, typename SFINAE = void>
struct npy_format_descriptor;
/* NumPy 1 proxy (always includes legacy fields) */
struct PyArrayDescr1_Proxy {
PyObject_HEAD
PyObject *typeobj;
char kind;
char type;
char byteorder;
char flags;
int type_num;
int elsize;
int alignment;
char *subarray;
PyObject *fields;
PyObject *names;
};
#ifndef PYBIND11_NUMPY_1_ONLY
struct PyArrayDescr_Proxy {
PyObject_HEAD
PyObject *typeobj;
char kind;
char type;
char byteorder;
char _former_flags;
int type_num;
/* Additional fields are NumPy version specific. */
};
#else
/* NumPy 1.x only, we can expose all fields */
using PyArrayDescr_Proxy = PyArrayDescr1_Proxy;
#endif
/* NumPy 2 proxy, including legacy fields */
struct PyArrayDescr2_Proxy {
PyObject_HEAD
PyObject *typeobj;
char kind;
char type;
char byteorder;
char _former_flags;
int type_num;
std::uint64_t flags;
ssize_t elsize;
ssize_t alignment;
PyObject *metadata;
Py_hash_t hash;
void *reserved_null[2];
/* The following fields only exist if 0 <= type_num < 2056 */
char *subarray;
PyObject *fields;
PyObject *names;
};
struct PyArray_Proxy {
PyObject_HEAD
char *data;
int nd;
ssize_t *dimensions;
ssize_t *strides;
PyObject *base;
PyObject *descr;
int flags;
};
struct PyVoidScalarObject_Proxy {
PyObject_VAR_HEAD char *obval;
PyArrayDescr_Proxy *descr;
int flags;
PyObject *base;
};
struct numpy_type_info {
PyObject *dtype_ptr;
std::string format_str;
};
struct numpy_internals {
std::unordered_map<std::type_index, numpy_type_info> registered_dtypes;
numpy_type_info *get_type_info(const std::type_info &tinfo, bool throw_if_missing = true) {
auto it = registered_dtypes.find(std::type_index(tinfo));
if (it != registered_dtypes.end()) {
return &(it->second);
}
if (throw_if_missing) {
pybind11_fail(std::string("NumPy type info missing for ") + tinfo.name());
}
return nullptr;
}
template <typename T>
numpy_type_info *get_type_info(bool throw_if_missing = true) {
return get_type_info(typeid(typename std::remove_cv<T>::type), throw_if_missing);
}
};
PYBIND11_NOINLINE void load_numpy_internals(numpy_internals *&ptr) {
ptr = &get_or_create_shared_data<numpy_internals>("_numpy_internals");
}
inline numpy_internals &get_numpy_internals() {
static numpy_internals *ptr = nullptr;
if (!ptr) {
load_numpy_internals(ptr);
}
return *ptr;
}
PYBIND11_NOINLINE module_ import_numpy_core_submodule(const char *submodule_name) {
module_ numpy = module_::import("numpy");
str version_string = numpy.attr("__version__");
module_ numpy_lib = module_::import("numpy.lib");
object numpy_version = numpy_lib.attr("NumpyVersion")(version_string);
int major_version = numpy_version.attr("major").cast<int>();
#ifdef PYBIND11_NUMPY_1_ONLY
if (major_version >= 2) {
throw std::runtime_error(
"This extension was built with PYBIND11_NUMPY_1_ONLY defined, "
"but NumPy 2 is used in this process. For NumPy2 compatibility, "
"this extension needs to be rebuilt without the PYBIND11_NUMPY_1_ONLY define.");
}
#endif
/* `numpy.core` was renamed to `numpy._core` in NumPy 2.0 as it officially
became a private module. */
std::string numpy_core_path = major_version >= 2 ? "numpy._core" : "numpy.core";
return module_::import((numpy_core_path + "." + submodule_name).c_str());
}
template <typename T>
struct same_size {
template <typename U>
using as = bool_constant<sizeof(T) == sizeof(U)>;
};
template <typename Concrete>
constexpr int platform_lookup() {
return -1;
}
// Lookup a type according to its size, and return a value corresponding to the NumPy typenum.
template <typename Concrete, typename T, typename... Ts, typename... Ints>
constexpr int platform_lookup(int I, Ints... Is) {
return sizeof(Concrete) == sizeof(T) ? I : platform_lookup<Concrete, Ts...>(Is...);
}
struct npy_api {
enum constants {
NPY_ARRAY_C_CONTIGUOUS_ = 0x0001,
NPY_ARRAY_F_CONTIGUOUS_ = 0x0002,
NPY_ARRAY_OWNDATA_ = 0x0004,
NPY_ARRAY_FORCECAST_ = 0x0010,
NPY_ARRAY_ENSUREARRAY_ = 0x0040,
NPY_ARRAY_ALIGNED_ = 0x0100,
NPY_ARRAY_WRITEABLE_ = 0x0400,
NPY_BOOL_ = 0,
NPY_BYTE_,
NPY_UBYTE_,
NPY_SHORT_,
NPY_USHORT_,
NPY_INT_,
NPY_UINT_,
NPY_LONG_,
NPY_ULONG_,
NPY_LONGLONG_,
NPY_ULONGLONG_,
NPY_FLOAT_,
NPY_DOUBLE_,
NPY_LONGDOUBLE_,
NPY_CFLOAT_,
NPY_CDOUBLE_,
NPY_CLONGDOUBLE_,
NPY_OBJECT_ = 17,
NPY_STRING_,
NPY_UNICODE_,
NPY_VOID_,
// Platform-dependent normalization
NPY_INT8_ = NPY_BYTE_,
NPY_UINT8_ = NPY_UBYTE_,
NPY_INT16_ = NPY_SHORT_,
NPY_UINT16_ = NPY_USHORT_,
// `npy_common.h` defines the integer aliases. In order, it checks:
// NPY_BITSOF_LONG, NPY_BITSOF_LONGLONG, NPY_BITSOF_INT, NPY_BITSOF_SHORT, NPY_BITSOF_CHAR
// and assigns the alias to the first matching size, so we should check in this order.
NPY_INT32_
= platform_lookup<std::int32_t, long, int, short>(NPY_LONG_, NPY_INT_, NPY_SHORT_),
NPY_UINT32_ = platform_lookup<std::uint32_t, unsigned long, unsigned int, unsigned short>(
NPY_ULONG_, NPY_UINT_, NPY_USHORT_),
NPY_INT64_
= platform_lookup<std::int64_t, long, long long, int>(NPY_LONG_, NPY_LONGLONG_, NPY_INT_),
NPY_UINT64_
= platform_lookup<std::uint64_t, unsigned long, unsigned long long, unsigned int>(
NPY_ULONG_, NPY_ULONGLONG_, NPY_UINT_),
};
unsigned int PyArray_RUNTIME_VERSION_;
struct PyArray_Dims {
Py_intptr_t *ptr;
int len;
};
static npy_api &get() {
PYBIND11_CONSTINIT static gil_safe_call_once_and_store<npy_api> storage;
return storage.call_once_and_store_result(lookup).get_stored();
}
bool PyArray_Check_(PyObject *obj) const {
return PyObject_TypeCheck(obj, PyArray_Type_) != 0;
}
bool PyArrayDescr_Check_(PyObject *obj) const {
return PyObject_TypeCheck(obj, PyArrayDescr_Type_) != 0;
}
unsigned int (*PyArray_GetNDArrayCFeatureVersion_)();
PyObject *(*PyArray_DescrFromType_)(int);
PyObject *(*PyArray_NewFromDescr_)(PyTypeObject *,
PyObject *,
int,
Py_intptr_t const *,
Py_intptr_t const *,
void *,
int,
PyObject *);
// Unused. Not removed because that affects ABI of the class.
PyObject *(*PyArray_DescrNewFromType_)(int);
int (*PyArray_CopyInto_)(PyObject *, PyObject *);
PyObject *(*PyArray_NewCopy_)(PyObject *, int);
PyTypeObject *PyArray_Type_;
PyTypeObject *PyVoidArrType_Type_;
PyTypeObject *PyArrayDescr_Type_;
PyObject *(*PyArray_DescrFromScalar_)(PyObject *);
PyObject *(*PyArray_FromAny_)(PyObject *, PyObject *, int, int, int, PyObject *);
int (*PyArray_DescrConverter_)(PyObject *, PyObject **);
bool (*PyArray_EquivTypes_)(PyObject *, PyObject *);
#ifdef PYBIND11_NUMPY_1_ONLY
int (*PyArray_GetArrayParamsFromObject_)(PyObject *,
PyObject *,
unsigned char,
PyObject **,
int *,
Py_intptr_t *,
PyObject **,
PyObject *);
#endif
PyObject *(*PyArray_Squeeze_)(PyObject *);
// Unused. Not removed because that affects ABI of the class.
int (*PyArray_SetBaseObject_)(PyObject *, PyObject *);
PyObject *(*PyArray_Resize_)(PyObject *, PyArray_Dims *, int, int);
PyObject *(*PyArray_Newshape_)(PyObject *, PyArray_Dims *, int);
PyObject *(*PyArray_View_)(PyObject *, PyObject *, PyObject *);
private:
enum functions {
API_PyArray_GetNDArrayCFeatureVersion = 211,
API_PyArray_Type = 2,
API_PyArrayDescr_Type = 3,
API_PyVoidArrType_Type = 39,
API_PyArray_DescrFromType = 45,
API_PyArray_DescrFromScalar = 57,
API_PyArray_FromAny = 69,
API_PyArray_Resize = 80,
// CopyInto was slot 82 and 50 was effectively an alias. NumPy 2 removed 82.
API_PyArray_CopyInto = 50,
API_PyArray_NewCopy = 85,
API_PyArray_NewFromDescr = 94,
API_PyArray_DescrNewFromType = 96,
API_PyArray_Newshape = 135,
API_PyArray_Squeeze = 136,
API_PyArray_View = 137,
API_PyArray_DescrConverter = 174,
API_PyArray_EquivTypes = 182,
#ifdef PYBIND11_NUMPY_1_ONLY
API_PyArray_GetArrayParamsFromObject = 278,
#endif
API_PyArray_SetBaseObject = 282
};
static npy_api lookup() {
module_ m = detail::import_numpy_core_submodule("multiarray");
auto c = m.attr("_ARRAY_API");
void **api_ptr = (void **) PyCapsule_GetPointer(c.ptr(), nullptr);
if (api_ptr == nullptr) {
raise_from(PyExc_SystemError, "FAILURE obtaining numpy _ARRAY_API pointer.");
throw error_already_set();
}
npy_api api;
#define DECL_NPY_API(Func) api.Func##_ = (decltype(api.Func##_)) api_ptr[API_##Func];
DECL_NPY_API(PyArray_GetNDArrayCFeatureVersion);
api.PyArray_RUNTIME_VERSION_ = api.PyArray_GetNDArrayCFeatureVersion_();
if (api.PyArray_RUNTIME_VERSION_ < 0x7) {
pybind11_fail("pybind11 numpy support requires numpy >= 1.7.0");
}
DECL_NPY_API(PyArray_Type);
DECL_NPY_API(PyVoidArrType_Type);
DECL_NPY_API(PyArrayDescr_Type);
DECL_NPY_API(PyArray_DescrFromType);
DECL_NPY_API(PyArray_DescrFromScalar);
DECL_NPY_API(PyArray_FromAny);
DECL_NPY_API(PyArray_Resize);
DECL_NPY_API(PyArray_CopyInto);
DECL_NPY_API(PyArray_NewCopy);
DECL_NPY_API(PyArray_NewFromDescr);
DECL_NPY_API(PyArray_DescrNewFromType);
DECL_NPY_API(PyArray_Newshape);
DECL_NPY_API(PyArray_Squeeze);
DECL_NPY_API(PyArray_View);
DECL_NPY_API(PyArray_DescrConverter);
DECL_NPY_API(PyArray_EquivTypes);
#ifdef PYBIND11_NUMPY_1_ONLY
DECL_NPY_API(PyArray_GetArrayParamsFromObject);
#endif
DECL_NPY_API(PyArray_SetBaseObject);
#undef DECL_NPY_API
return api;
}
};
inline PyArray_Proxy *array_proxy(void *ptr) { return reinterpret_cast<PyArray_Proxy *>(ptr); }
inline const PyArray_Proxy *array_proxy(const void *ptr) {
return reinterpret_cast<const PyArray_Proxy *>(ptr);
}
inline PyArrayDescr_Proxy *array_descriptor_proxy(PyObject *ptr) {
return reinterpret_cast<PyArrayDescr_Proxy *>(ptr);
}
inline const PyArrayDescr_Proxy *array_descriptor_proxy(const PyObject *ptr) {
return reinterpret_cast<const PyArrayDescr_Proxy *>(ptr);
}
inline const PyArrayDescr1_Proxy *array_descriptor1_proxy(const PyObject *ptr) {
return reinterpret_cast<const PyArrayDescr1_Proxy *>(ptr);
}
inline const PyArrayDescr2_Proxy *array_descriptor2_proxy(const PyObject *ptr) {
return reinterpret_cast<const PyArrayDescr2_Proxy *>(ptr);
}
inline bool check_flags(const void *ptr, int flag) {
return (flag == (array_proxy(ptr)->flags & flag));
}
template <typename T>
struct is_std_array : std::false_type {};
template <typename T, size_t N>
struct is_std_array<std::array<T, N>> : std::true_type {};
template <typename T>
struct is_complex : std::false_type {};
template <typename T>
struct is_complex<std::complex<T>> : std::true_type {};
template <typename T>
struct array_info_scalar {
using type = T;
static constexpr bool is_array = false;
static constexpr bool is_empty = false;
static constexpr auto extents = const_name("");
static void append_extents(list & /* shape */) {}
};
// Computes underlying type and a comma-separated list of extents for array
// types (any mix of std::array and built-in arrays). An array of char is
// treated as scalar because it gets special handling.
template <typename T>
struct array_info : array_info_scalar<T> {};
template <typename T, size_t N>
struct array_info<std::array<T, N>> {
using type = typename array_info<T>::type;
static constexpr bool is_array = true;
static constexpr bool is_empty = (N == 0) || array_info<T>::is_empty;
static constexpr size_t extent = N;
// appends the extents to shape
static void append_extents(list &shape) {
shape.append(N);
array_info<T>::append_extents(shape);
}
static constexpr auto extents = const_name<array_info<T>::is_array>(
::pybind11::detail::concat(const_name<N>(), array_info<T>::extents), const_name<N>());
};
// For numpy we have special handling for arrays of characters, so we don't include
// the size in the array extents.
template <size_t N>
struct array_info<char[N]> : array_info_scalar<char[N]> {};
template <size_t N>
struct array_info<std::array<char, N>> : array_info_scalar<std::array<char, N>> {};
template <typename T, size_t N>
struct array_info<T[N]> : array_info<std::array<T, N>> {};
template <typename T>
using remove_all_extents_t = typename array_info<T>::type;
template <typename T>
using is_pod_struct
= all_of<std::is_standard_layout<T>, // since we're accessing directly in memory
// we need a standard layout type
#if defined(__GLIBCXX__) \
&& (__GLIBCXX__ < 20150422 || __GLIBCXX__ == 20150426 || __GLIBCXX__ == 20150623 \
|| __GLIBCXX__ == 20150626 || __GLIBCXX__ == 20160803)
// libstdc++ < 5 (including versions 4.8.5, 4.9.3 and 4.9.4 which were released after
// 5) don't implement is_trivially_copyable, so approximate it
std::is_trivially_destructible<T>,
satisfies_any_of<T, std::has_trivial_copy_constructor, std::has_trivial_copy_assign>,
#else
std::is_trivially_copyable<T>,
#endif
satisfies_none_of<T,
std::is_reference,
std::is_array,
is_std_array,
std::is_arithmetic,
is_complex,
std::is_enum>>;
// Replacement for std::is_pod (deprecated in C++20)
template <typename T>
using is_pod = all_of<std::is_standard_layout<T>, std::is_trivial<T>>;
template <ssize_t Dim = 0, typename Strides>
ssize_t byte_offset_unsafe(const Strides &) {
return 0;
}
template <ssize_t Dim = 0, typename Strides, typename... Ix>
ssize_t byte_offset_unsafe(const Strides &strides, ssize_t i, Ix... index) {
return i * strides[Dim] + byte_offset_unsafe<Dim + 1>(strides, index...);
}
/**
* Proxy class providing unsafe, unchecked const access to array data. This is constructed through
* the `unchecked<T, N>()` method of `array` or the `unchecked<N>()` method of `array_t<T>`. `Dims`
* will be -1 for dimensions determined at runtime.
*/
template <typename T, ssize_t Dims>
class unchecked_reference {
protected:
static constexpr bool Dynamic = Dims < 0;
const unsigned char *data_;
// Storing the shape & strides in local variables (i.e. these arrays) allows the compiler to
// make large performance gains on big, nested loops, but requires compile-time dimensions
conditional_t<Dynamic, const ssize_t *, std::array<ssize_t, (size_t) Dims>> shape_, strides_;
const ssize_t dims_;
friend class pybind11::array;
// Constructor for compile-time dimensions:
template <bool Dyn = Dynamic>
unchecked_reference(const void *data,
const ssize_t *shape,
const ssize_t *strides,
enable_if_t<!Dyn, ssize_t>)
: data_{reinterpret_cast<const unsigned char *>(data)}, dims_{Dims} {
for (size_t i = 0; i < (size_t) dims_; i++) {
shape_[i] = shape[i];
strides_[i] = strides[i];
}
}
// Constructor for runtime dimensions:
template <bool Dyn = Dynamic>
unchecked_reference(const void *data,
const ssize_t *shape,
const ssize_t *strides,
enable_if_t<Dyn, ssize_t> dims)
: data_{reinterpret_cast<const unsigned char *>(data)}, shape_{shape}, strides_{strides},
dims_{dims} {}
public:
/**
* Unchecked const reference access to data at the given indices. For a compile-time known
* number of dimensions, this requires the correct number of arguments; for run-time
* dimensionality, this is not checked (and so is up to the caller to use safely).
*/
template <typename... Ix>
const T &operator()(Ix... index) const {
static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic,
"Invalid number of indices for unchecked array reference");
return *reinterpret_cast<const T *>(data_
+ byte_offset_unsafe(strides_, ssize_t(index)...));
}
/**
* Unchecked const reference access to data; this operator only participates if the reference
* is to a 1-dimensional array. When present, this is exactly equivalent to `obj(index)`.
*/
template <ssize_t D = Dims, typename = enable_if_t<D == 1 || Dynamic>>
const T &operator[](ssize_t index) const {
return operator()(index);
}
/// Pointer access to the data at the given indices.
template <typename... Ix>
const T *data(Ix... ix) const {
return &operator()(ssize_t(ix)...);
}
/// Returns the item size, i.e. sizeof(T)
constexpr static ssize_t itemsize() { return sizeof(T); }
/// Returns the shape (i.e. size) of dimension `dim`
ssize_t shape(ssize_t dim) const { return shape_[(size_t) dim]; }
/// Returns the number of dimensions of the array
ssize_t ndim() const { return dims_; }
/// Returns the total number of elements in the referenced array, i.e. the product of the
/// shapes
template <bool Dyn = Dynamic>
enable_if_t<!Dyn, ssize_t> size() const {
return std::accumulate(
shape_.begin(), shape_.end(), (ssize_t) 1, std::multiplies<ssize_t>());
}
template <bool Dyn = Dynamic>
enable_if_t<Dyn, ssize_t> size() const {
return std::accumulate(shape_, shape_ + ndim(), (ssize_t) 1, std::multiplies<ssize_t>());
}
/// Returns the total number of bytes used by the referenced data. Note that the actual span
/// in memory may be larger if the referenced array has non-contiguous strides (e.g. for a
/// slice).
ssize_t nbytes() const { return size() * itemsize(); }
};
template <typename T, ssize_t Dims>
class unchecked_mutable_reference : public unchecked_reference<T, Dims> {
friend class pybind11::array;
using ConstBase = unchecked_reference<T, Dims>;
using ConstBase::ConstBase;
using ConstBase::Dynamic;
public:
// Bring in const-qualified versions from base class
using ConstBase::operator();
using ConstBase::operator[];
/// Mutable, unchecked access to data at the given indices.
template <typename... Ix>
T &operator()(Ix... index) {
static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic,
"Invalid number of indices for unchecked array reference");
return const_cast<T &>(ConstBase::operator()(index...));
}
/**
* Mutable, unchecked access data at the given index; this operator only participates if the
* reference is to a 1-dimensional array (or has runtime dimensions). When present, this is
* exactly equivalent to `obj(index)`.
*/
template <ssize_t D = Dims, typename = enable_if_t<D == 1 || Dynamic>>
T &operator[](ssize_t index) {
return operator()(index);
}
/// Mutable pointer access to the data at the given indices.
template <typename... Ix>
T *mutable_data(Ix... ix) {
return &operator()(ssize_t(ix)...);
}
};
template <typename T, ssize_t Dim>
struct type_caster<unchecked_reference<T, Dim>> {
static_assert(Dim == 0 && Dim > 0 /* always fail */,
"unchecked array proxy object is not castable");
};
template <typename T, ssize_t Dim>
struct type_caster<unchecked_mutable_reference<T, Dim>>
: type_caster<unchecked_reference<T, Dim>> {};
PYBIND11_NAMESPACE_END(detail)
class dtype : public object {
public:
PYBIND11_OBJECT_DEFAULT(dtype, object, detail::npy_api::get().PyArrayDescr_Check_)
explicit dtype(const buffer_info &info) {
dtype descr(_dtype_from_pep3118()(pybind11::str(info.format)));
// If info.itemsize == 0, use the value calculated from the format string
m_ptr = descr.strip_padding(info.itemsize != 0 ? info.itemsize : descr.itemsize())
.release()
.ptr();
}
explicit dtype(const pybind11::str &format) : dtype(from_args(format)) {}
explicit dtype(const std::string &format) : dtype(pybind11::str(format)) {}
explicit dtype(const char *format) : dtype(pybind11::str(format)) {}
dtype(list names, list formats, list offsets, ssize_t itemsize) {
dict args;
args["names"] = std::move(names);
args["formats"] = std::move(formats);
args["offsets"] = std::move(offsets);
args["itemsize"] = pybind11::int_(itemsize);
m_ptr = from_args(args).release().ptr();
}
/// Return dtype for the given typenum (one of the NPY_TYPES).
/// https://numpy.org/devdocs/reference/c-api/array.html#c.PyArray_DescrFromType
explicit dtype(int typenum)
: object(detail::npy_api::get().PyArray_DescrFromType_(typenum), stolen_t{}) {
if (m_ptr == nullptr) {
throw error_already_set();
}
}
/// This is essentially the same as calling numpy.dtype(args) in Python.
static dtype from_args(const object &args) {
PyObject *ptr = nullptr;
if ((detail::npy_api::get().PyArray_DescrConverter_(args.ptr(), &ptr) == 0) || !ptr) {
throw error_already_set();
}
return reinterpret_steal<dtype>(ptr);
}
/// Return dtype associated with a C++ type.
template <typename T>
static dtype of() {
return detail::npy_format_descriptor<typename std::remove_cv<T>::type>::dtype();
}
/// Size of the data type in bytes.
#ifdef PYBIND11_NUMPY_1_ONLY
ssize_t itemsize() const { return detail::array_descriptor_proxy(m_ptr)->elsize; }
#else
ssize_t itemsize() const {
if (detail::npy_api::get().PyArray_RUNTIME_VERSION_ < 0x12) {
return detail::array_descriptor1_proxy(m_ptr)->elsize;
}
return detail::array_descriptor2_proxy(m_ptr)->elsize;
}
#endif
/// Returns true for structured data types.
#ifdef PYBIND11_NUMPY_1_ONLY
bool has_fields() const { return detail::array_descriptor_proxy(m_ptr)->names != nullptr; }
#else
bool has_fields() const {
if (detail::npy_api::get().PyArray_RUNTIME_VERSION_ < 0x12) {
return detail::array_descriptor1_proxy(m_ptr)->names != nullptr;
}
const auto *proxy = detail::array_descriptor2_proxy(m_ptr);
if (proxy->type_num < 0 || proxy->type_num >= 2056) {
return false;
}
return proxy->names != nullptr;
}
#endif
/// Single-character code for dtype's kind.
/// For example, floating point types are 'f' and integral types are 'i'.
char kind() const { return detail::array_descriptor_proxy(m_ptr)->kind; }
/// Single-character for dtype's type.
/// For example, ``float`` is 'f', ``double`` 'd', ``int`` 'i', and ``long`` 'l'.
char char_() const {
// Note: The signature, `dtype::char_` follows the naming of NumPy's
// public Python API (i.e., ``dtype.char``), rather than its internal
// C API (``PyArray_Descr::type``).
return detail::array_descriptor_proxy(m_ptr)->type;
}
/// type number of dtype.
int num() const {
// Note: The signature, `dtype::num` follows the naming of NumPy's public
// Python API (i.e., ``dtype.num``), rather than its internal
// C API (``PyArray_Descr::type_num``).
return detail::array_descriptor_proxy(m_ptr)->type_num;
}
/// Single character for byteorder
char byteorder() const { return detail::array_descriptor_proxy(m_ptr)->byteorder; }
/// Alignment of the data type
#ifdef PYBIND11_NUMPY_1_ONLY
int alignment() const { return detail::array_descriptor_proxy(m_ptr)->alignment; }
#else
ssize_t alignment() const {
if (detail::npy_api::get().PyArray_RUNTIME_VERSION_ < 0x12) {
return detail::array_descriptor1_proxy(m_ptr)->alignment;
}
return detail::array_descriptor2_proxy(m_ptr)->alignment;
}
#endif
/// Flags for the array descriptor
#ifdef PYBIND11_NUMPY_1_ONLY
char flags() const { return detail::array_descriptor_proxy(m_ptr)->flags; }
#else
std::uint64_t flags() const {
if (detail::npy_api::get().PyArray_RUNTIME_VERSION_ < 0x12) {
return (unsigned char) detail::array_descriptor1_proxy(m_ptr)->flags;
}
return detail::array_descriptor2_proxy(m_ptr)->flags;
}
#endif
private:
static object &_dtype_from_pep3118() {
PYBIND11_CONSTINIT static gil_safe_call_once_and_store<object> storage;
return storage
.call_once_and_store_result([]() {
return detail::import_numpy_core_submodule("_internal")
.attr("_dtype_from_pep3118");
})
.get_stored();
}
dtype strip_padding(ssize_t itemsize) {
// Recursively strip all void fields with empty names that are generated for
// padding fields (as of NumPy v1.11).
if (!has_fields()) {
return *this;
}
struct field_descr {
pybind11::str name;
object format;
pybind11::int_ offset;
field_descr(pybind11::str &&name, object &&format, pybind11::int_ &&offset)
: name{std::move(name)}, format{std::move(format)}, offset{std::move(offset)} {};
};
auto field_dict = attr("fields").cast<dict>();
std::vector<field_descr> field_descriptors;
field_descriptors.reserve(field_dict.size());
for (auto field : field_dict.attr("items")()) {
auto spec = field.cast<tuple>();
auto name = spec[0].cast<pybind11::str>();
auto spec_fo = spec[1].cast<tuple>();
auto format = spec_fo[0].cast<dtype>();
auto offset = spec_fo[1].cast<pybind11::int_>();
if ((len(name) == 0u) && format.kind() == 'V') {
continue;
}
field_descriptors.emplace_back(
std::move(name), format.strip_padding(format.itemsize()), std::move(offset));
}
std::sort(field_descriptors.begin(),
field_descriptors.end(),
[](const field_descr &a, const field_descr &b) {
return a.offset.cast<int>() < b.offset.cast<int>();
});
list names, formats, offsets;
for (auto &descr : field_descriptors) {
names.append(std::move(descr.name));
formats.append(std::move(descr.format));
offsets.append(std::move(descr.offset));
}
return dtype(std::move(names), std::move(formats), std::move(offsets), itemsize);
}
};
class array : public buffer {
public:
PYBIND11_OBJECT_CVT(array, buffer, detail::npy_api::get().PyArray_Check_, raw_array)
enum {
c_style = detail::npy_api::NPY_ARRAY_C_CONTIGUOUS_,
f_style = detail::npy_api::NPY_ARRAY_F_CONTIGUOUS_,
forcecast = detail::npy_api::NPY_ARRAY_FORCECAST_
};
array() : array(0, static_cast<const double *>(nullptr)) {}
using ShapeContainer = detail::any_container<ssize_t>;
using StridesContainer = detail::any_container<ssize_t>;
// Constructs an array taking shape/strides from arbitrary container types
array(const pybind11::dtype &dt,
ShapeContainer shape,
StridesContainer strides,
const void *ptr = nullptr,
handle base = handle()) {
if (strides->empty()) {
*strides = detail::c_strides(*shape, dt.itemsize());
}
auto ndim = shape->size();
if (ndim != strides->size()) {
pybind11_fail("NumPy: shape ndim doesn't match strides ndim");
}
auto descr = dt;
int flags = 0;
if (base && ptr) {
if (isinstance<array>(base)) {
/* Copy flags from base (except ownership bit) */
flags = reinterpret_borrow<array>(base).flags()
& ~detail::npy_api::NPY_ARRAY_OWNDATA_;
} else {
/* Writable by default, easy to downgrade later on if needed */
flags = detail::npy_api::NPY_ARRAY_WRITEABLE_;
}
}
auto &api = detail::npy_api::get();
auto tmp = reinterpret_steal<object>(api.PyArray_NewFromDescr_(
api.PyArray_Type_,
descr.release().ptr(),
(int) ndim,
// Use reinterpret_cast for PyPy on Windows (remove if fixed, checked on 7.3.1)
reinterpret_cast<Py_intptr_t *>(shape->data()),
reinterpret_cast<Py_intptr_t *>(strides->data()),
const_cast<void *>(ptr),
flags,
nullptr));
if (!tmp) {
throw error_already_set();
}
if (ptr) {
if (base) {
api.PyArray_SetBaseObject_(tmp.ptr(), base.inc_ref().ptr());
} else {
tmp = reinterpret_steal<object>(
api.PyArray_NewCopy_(tmp.ptr(), -1 /* any order */));
}
}
m_ptr = tmp.release().ptr();
}
array(const pybind11::dtype &dt,
ShapeContainer shape,
const void *ptr = nullptr,
handle base = handle())
: array(dt, std::move(shape), {}, ptr, base) {}
template <typename T,
typename
= detail::enable_if_t<std::is_integral<T>::value && !std::is_same<bool, T>::value>>
array(const pybind11::dtype &dt, T count, const void *ptr = nullptr, handle base = handle())
: array(dt, {{count}}, ptr, base) {}
template <typename T>
array(ShapeContainer shape, StridesContainer strides, const T *ptr, handle base = handle())
: array(pybind11::dtype::of<T>(),
std::move(shape),
std::move(strides),
reinterpret_cast<const void *>(ptr),
base) {}
template <typename T>
array(ShapeContainer shape, const T *ptr, handle base = handle())
: array(std::move(shape), {}, ptr, base) {}
template <typename T>
explicit array(ssize_t count, const T *ptr, handle base = handle())
: array({count}, {}, ptr, base) {}
explicit array(const buffer_info &info, handle base = handle())
: array(pybind11::dtype(info), info.shape, info.strides, info.ptr, base) {}
/// Array descriptor (dtype)
pybind11::dtype dtype() const {
return reinterpret_borrow<pybind11::dtype>(detail::array_proxy(m_ptr)->descr);
}
/// Total number of elements
ssize_t size() const {
return std::accumulate(shape(), shape() + ndim(), (ssize_t) 1, std::multiplies<ssize_t>());
}
/// Byte size of a single element
ssize_t itemsize() const { return dtype().itemsize(); }
/// Total number of bytes
ssize_t nbytes() const { return size() * itemsize(); }
/// Number of dimensions
ssize_t ndim() const { return detail::array_proxy(m_ptr)->nd; }
/// Base object
object base() const { return reinterpret_borrow<object>(detail::array_proxy(m_ptr)->base); }
/// Dimensions of the array
const ssize_t *shape() const { return detail::array_proxy(m_ptr)->dimensions; }
/// Dimension along a given axis
ssize_t shape(ssize_t dim) const {
if (dim >= ndim()) {
fail_dim_check(dim, "invalid axis");
}
return shape()[dim];
}
/// Strides of the array
const ssize_t *strides() const { return detail::array_proxy(m_ptr)->strides; }
/// Stride along a given axis
ssize_t strides(ssize_t dim) const {
if (dim >= ndim()) {
fail_dim_check(dim, "invalid axis");
}
return strides()[dim];
}
/// Return the NumPy array flags
int flags() const { return detail::array_proxy(m_ptr)->flags; }
/// If set, the array is writeable (otherwise the buffer is read-only)
bool writeable() const {
return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_WRITEABLE_);
}
/// If set, the array owns the data (will be freed when the array is deleted)
bool owndata() const {
return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_OWNDATA_);
}
/// Pointer to the contained data. If index is not provided, points to the
/// beginning of the buffer. May throw if the index would lead to out of bounds access.
template <typename... Ix>
const void *data(Ix... index) const {
return static_cast<const void *>(detail::array_proxy(m_ptr)->data + offset_at(index...));
}
/// Mutable pointer to the contained data. If index is not provided, points to the
/// beginning of the buffer. May throw if the index would lead to out of bounds access.
/// May throw if the array is not writeable.
template <typename... Ix>
void *mutable_data(Ix... index) {
check_writeable();
return static_cast<void *>(detail::array_proxy(m_ptr)->data + offset_at(index...));
}
/// Byte offset from beginning of the array to a given index (full or partial).
/// May throw if the index would lead to out of bounds access.
template <typename... Ix>
ssize_t offset_at(Ix... index) const {
if ((ssize_t) sizeof...(index) > ndim()) {
fail_dim_check(sizeof...(index), "too many indices for an array");
}
return byte_offset(ssize_t(index)...);
}
ssize_t offset_at() const { return 0; }
/// Item count from beginning of the array to a given index (full or partial).
/// May throw if the index would lead to out of bounds access.
template <typename... Ix>
ssize_t index_at(Ix... index) const {
return offset_at(index...) / itemsize();
}
/**
* Returns a proxy object that provides access to the array's data without bounds or
* dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with
* care: the array must not be destroyed or reshaped for the duration of the returned object,
* and the caller must take care not to access invalid dimensions or dimension indices.
*/
template <typename T, ssize_t Dims = -1>
detail::unchecked_mutable_reference<T, Dims> mutable_unchecked() & {
if (Dims >= 0 && ndim() != Dims) {
throw std::domain_error("array has incorrect number of dimensions: "
+ std::to_string(ndim()) + "; expected "
+ std::to_string(Dims));
}
return detail::unchecked_mutable_reference<T, Dims>(
mutable_data(), shape(), strides(), ndim());
}
/**
* Returns a proxy object that provides const access to the array's data without bounds or
* dimensionality checking. Unlike `mutable_unchecked()`, this does not require that the
* underlying array have the `writable` flag. Use with care: the array must not be destroyed
* or reshaped for the duration of the returned object, and the caller must take care not to
* access invalid dimensions or dimension indices.
*/
template <typename T, ssize_t Dims = -1>
detail::unchecked_reference<T, Dims> unchecked() const & {
if (Dims >= 0 && ndim() != Dims) {
throw std::domain_error("array has incorrect number of dimensions: "
+ std::to_string(ndim()) + "; expected "
+ std::to_string(Dims));
}
return detail::unchecked_reference<T, Dims>(data(), shape(), strides(), ndim());
}
/// Return a new view with all of the dimensions of length 1 removed
array squeeze() {
auto &api = detail::npy_api::get();
return reinterpret_steal<array>(api.PyArray_Squeeze_(m_ptr));
}
/// Resize array to given shape
/// If refcheck is true and more that one reference exist to this array
/// then resize will succeed only if it makes a reshape, i.e. original size doesn't change
void resize(ShapeContainer new_shape, bool refcheck = true) {
detail::npy_api::PyArray_Dims d
= {// Use reinterpret_cast for PyPy on Windows (remove if fixed, checked on 7.3.1)
reinterpret_cast<Py_intptr_t *>(new_shape->data()),
int(new_shape->size())};
// try to resize, set ordering param to -1 cause it's not used anyway
auto new_array = reinterpret_steal<object>(
detail::npy_api::get().PyArray_Resize_(m_ptr, &d, int(refcheck), -1));
if (!new_array) {
throw error_already_set();
}
if (isinstance<array>(new_array)) {
*this = std::move(new_array);
}
}
/// Optional `order` parameter omitted, to be added as needed.
array reshape(ShapeContainer new_shape) {
detail::npy_api::PyArray_Dims d
= {reinterpret_cast<Py_intptr_t *>(new_shape->data()), int(new_shape->size())};
auto new_array
= reinterpret_steal<array>(detail::npy_api::get().PyArray_Newshape_(m_ptr, &d, 0));
if (!new_array) {
throw error_already_set();
}
return new_array;
}
/// Create a view of an array in a different data type.
/// This function may fundamentally reinterpret the data in the array.
/// It is the responsibility of the caller to ensure that this is safe.
/// Only supports the `dtype` argument, the `type` argument is omitted,
/// to be added as needed.
array view(const std::string &dtype) {
auto &api = detail::npy_api::get();
auto new_view = reinterpret_steal<array>(api.PyArray_View_(
m_ptr, dtype::from_args(pybind11::str(dtype)).release().ptr(), nullptr));
if (!new_view) {
throw error_already_set();
}
return new_view;
}
/// Ensure that the argument is a NumPy array
/// In case of an error, nullptr is returned and the Python error is cleared.
static array ensure(handle h, int ExtraFlags = 0) {
auto result = reinterpret_steal<array>(raw_array(h.ptr(), ExtraFlags));
if (!result) {
PyErr_Clear();
}
return result;
}
protected:
template <typename, typename>
friend struct detail::npy_format_descriptor;
void fail_dim_check(ssize_t dim, const std::string &msg) const {
throw index_error(msg + ": " + std::to_string(dim) + " (ndim = " + std::to_string(ndim())
+ ')');
}
template <typename... Ix>
ssize_t byte_offset(Ix... index) const {
check_dimensions(index...);
return detail::byte_offset_unsafe(strides(), ssize_t(index)...);
}
void check_writeable() const {
if (!writeable()) {
throw std::domain_error("array is not writeable");
}
}
template <typename... Ix>
void check_dimensions(Ix... index) const {
check_dimensions_impl(ssize_t(0), shape(), ssize_t(index)...);
}
void check_dimensions_impl(ssize_t, const ssize_t *) const {}
template <typename... Ix>
void check_dimensions_impl(ssize_t axis, const ssize_t *shape, ssize_t i, Ix... index) const {
if (i >= *shape) {
throw index_error(std::string("index ") + std::to_string(i)
+ " is out of bounds for axis " + std::to_string(axis)
+ " with size " + std::to_string(*shape));
}
check_dimensions_impl(axis + 1, shape + 1, index...);
}
/// Create array from any object -- always returns a new reference
static PyObject *raw_array(PyObject *ptr, int ExtraFlags = 0) {
if (ptr == nullptr) {
set_error(PyExc_ValueError, "cannot create a pybind11::array from a nullptr");
return nullptr;
}
return detail::npy_api::get().PyArray_FromAny_(
ptr, nullptr, 0, 0, detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr);
}
};
template <typename T, int ExtraFlags = array::forcecast>
class array_t : public array {
private:
struct private_ctor {};
// Delegating constructor needed when both moving and accessing in the same constructor
array_t(private_ctor,
ShapeContainer &&shape,
StridesContainer &&strides,
const T *ptr,
handle base)
: array(std::move(shape), std::move(strides), ptr, base) {}
public:
static_assert(!detail::array_info<T>::is_array, "Array types cannot be used with array_t");
using value_type = T;
array_t() : array(0, static_cast<const T *>(nullptr)) {}
array_t(handle h, borrowed_t) : array(h, borrowed_t{}) {}
array_t(handle h, stolen_t) : array(h, stolen_t{}) {}
PYBIND11_DEPRECATED("Use array_t<T>::ensure() instead")
array_t(handle h, bool is_borrowed) : array(raw_array_t(h.ptr()), stolen_t{}) {
if (!m_ptr) {
PyErr_Clear();
}
if (!is_borrowed) {
Py_XDECREF(h.ptr());
}
}
// NOLINTNEXTLINE(google-explicit-constructor)
array_t(const object &o) : array(raw_array_t(o.ptr()), stolen_t{}) {
if (!m_ptr) {
throw error_already_set();
}
}
explicit array_t(const buffer_info &info, handle base = handle()) : array(info, base) {}
array_t(ShapeContainer shape,
StridesContainer strides,
const T *ptr = nullptr,
handle base = handle())
: array(std::move(shape), std::move(strides), ptr, base) {}
explicit array_t(ShapeContainer shape, const T *ptr = nullptr, handle base = handle())
: array_t(private_ctor{},
std::move(shape),
(ExtraFlags & f_style) != 0 ? detail::f_strides(*shape, itemsize())
: detail::c_strides(*shape, itemsize()),
ptr,
base) {}
explicit array_t(ssize_t count, const T *ptr = nullptr, handle base = handle())
: array({count}, {}, ptr, base) {}
constexpr ssize_t itemsize() const { return sizeof(T); }
template <typename... Ix>
ssize_t index_at(Ix... index) const {
return offset_at(index...) / itemsize();
}
template <typename... Ix>
const T *data(Ix... index) const {
return static_cast<const T *>(array::data(index...));
}
template <typename... Ix>
T *mutable_data(Ix... index) {
return static_cast<T *>(array::mutable_data(index...));
}
// Reference to element at a given index
template <typename... Ix>
const T &at(Ix... index) const {
if ((ssize_t) sizeof...(index) != ndim()) {
fail_dim_check(sizeof...(index), "index dimension mismatch");
}
return *(static_cast<const T *>(array::data())
+ byte_offset(ssize_t(index)...) / itemsize());
}
// Mutable reference to element at a given index
template <typename... Ix>
T &mutable_at(Ix... index) {
if ((ssize_t) sizeof...(index) != ndim()) {
fail_dim_check(sizeof...(index), "index dimension mismatch");
}
return *(static_cast<T *>(array::mutable_data())
+ byte_offset(ssize_t(index)...) / itemsize());
}
/**
* Returns a proxy object that provides access to the array's data without bounds or
* dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with
* care: the array must not be destroyed or reshaped for the duration of the returned object,
* and the caller must take care not to access invalid dimensions or dimension indices.
*/
template <ssize_t Dims = -1>
detail::unchecked_mutable_reference<T, Dims> mutable_unchecked() & {
return array::mutable_unchecked<T, Dims>();
}
/**
* Returns a proxy object that provides const access to the array's data without bounds or
* dimensionality checking. Unlike `mutable_unchecked()`, this does not require that the
* underlying array have the `writable` flag. Use with care: the array must not be destroyed
* or reshaped for the duration of the returned object, and the caller must take care not to
* access invalid dimensions or dimension indices.
*/
template <ssize_t Dims = -1>
detail::unchecked_reference<T, Dims> unchecked() const & {
return array::unchecked<T, Dims>();
}
/// Ensure that the argument is a NumPy array of the correct dtype (and if not, try to convert
/// it). In case of an error, nullptr is returned and the Python error is cleared.
static array_t ensure(handle h) {
auto result = reinterpret_steal<array_t>(raw_array_t(h.ptr()));
if (!result) {
PyErr_Clear();
}
return result;
}
static bool check_(handle h) {
const auto &api = detail::npy_api::get();
return api.PyArray_Check_(h.ptr())
&& api.PyArray_EquivTypes_(detail::array_proxy(h.ptr())->descr,
dtype::of<T>().ptr())
&& detail::check_flags(h.ptr(), ExtraFlags & (array::c_style | array::f_style));
}
protected:
/// Create array from any object -- always returns a new reference
static PyObject *raw_array_t(PyObject *ptr) {
if (ptr == nullptr) {
set_error(PyExc_ValueError, "cannot create a pybind11::array_t from a nullptr");
return nullptr;
}
return detail::npy_api::get().PyArray_FromAny_(ptr,
dtype::of<T>().release().ptr(),
0,
0,
detail::npy_api::NPY_ARRAY_ENSUREARRAY_
| ExtraFlags,
nullptr);
}
};
template <typename T>
struct format_descriptor<T, detail::enable_if_t<detail::is_pod_struct<T>::value>> {
static std::string format() {
return detail::npy_format_descriptor<typename std::remove_cv<T>::type>::format();
}
};
template <size_t N>
struct format_descriptor<char[N]> {
static std::string format() { return std::to_string(N) + 's'; }
};
template <size_t N>
struct format_descriptor<std::array<char, N>> {
static std::string format() { return std::to_string(N) + 's'; }
};
template <typename T>
struct format_descriptor<T, detail::enable_if_t<std::is_enum<T>::value>> {
static std::string format() {
return format_descriptor<
typename std::remove_cv<typename std::underlying_type<T>::type>::type>::format();
}
};
template <typename T>
struct format_descriptor<T, detail::enable_if_t<detail::array_info<T>::is_array>> {
static std::string format() {
using namespace detail;
static constexpr auto extents = const_name("(") + array_info<T>::extents + const_name(")");
return extents.text + format_descriptor<remove_all_extents_t<T>>::format();
}
};
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename T, int ExtraFlags>
struct pyobject_caster<array_t<T, ExtraFlags>> {
using type = array_t<T, ExtraFlags>;
bool load(handle src, bool convert) {
if (!convert && !type::check_(src)) {
return false;
}
value = type::ensure(src);
return static_cast<bool>(value);
}
static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) {
return src.inc_ref();
}
PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name);
};
template <typename T>
struct compare_buffer_info<T, detail::enable_if_t<detail::is_pod_struct<T>::value>> {
static bool compare(const buffer_info &b) {
return npy_api::get().PyArray_EquivTypes_(dtype::of<T>().ptr(), dtype(b).ptr());
}
};
template <typename T, typename = void>
struct npy_format_descriptor_name;
template <typename T>
struct npy_format_descriptor_name<T, enable_if_t<std::is_integral<T>::value>> {
static constexpr auto name = const_name<std::is_same<T, bool>::value>(
const_name("bool"),
const_name<std::is_signed<T>::value>("numpy.int", "numpy.uint")
+ const_name<sizeof(T) * 8>());
};
template <typename T>
struct npy_format_descriptor_name<T, enable_if_t<std::is_floating_point<T>::value>> {
static constexpr auto name = const_name < std::is_same<T, float>::value
|| std::is_same<T, const float>::value
|| std::is_same<T, double>::value
|| std::is_same<T, const double>::value
> (const_name("numpy.float") + const_name<sizeof(T) * 8>(),
const_name("numpy.longdouble"));
};
template <typename T>
struct npy_format_descriptor_name<T, enable_if_t<is_complex<T>::value>> {
static constexpr auto name = const_name < std::is_same<typename T::value_type, float>::value
|| std::is_same<typename T::value_type, const float>::value
|| std::is_same<typename T::value_type, double>::value
|| std::is_same<typename T::value_type, const double>::value
> (const_name("numpy.complex")
+ const_name<sizeof(typename T::value_type) * 16>(),
const_name("numpy.longcomplex"));
};
template <typename T>
struct npy_format_descriptor<
T,
enable_if_t<satisfies_any_of<T, std::is_arithmetic, is_complex>::value>>
: npy_format_descriptor_name<T> {
private:
// NB: the order here must match the one in common.h
constexpr static const int values[15] = {npy_api::NPY_BOOL_,
npy_api::NPY_BYTE_,
npy_api::NPY_UBYTE_,
npy_api::NPY_INT16_,
npy_api::NPY_UINT16_,
npy_api::NPY_INT32_,
npy_api::NPY_UINT32_,
npy_api::NPY_INT64_,
npy_api::NPY_UINT64_,
npy_api::NPY_FLOAT_,
npy_api::NPY_DOUBLE_,
npy_api::NPY_LONGDOUBLE_,
npy_api::NPY_CFLOAT_,
npy_api::NPY_CDOUBLE_,
npy_api::NPY_CLONGDOUBLE_};
public:
static constexpr int value = values[detail::is_fmt_numeric<T>::index];
static pybind11::dtype dtype() { return pybind11::dtype(/*typenum*/ value); }
};
template <typename T>
struct npy_format_descriptor<T, enable_if_t<is_same_ignoring_cvref<T, PyObject *>::value>> {
static constexpr auto name = const_name("object");
static constexpr int value = npy_api::NPY_OBJECT_;
static pybind11::dtype dtype() { return pybind11::dtype(/*typenum*/ value); }
};
#define PYBIND11_DECL_CHAR_FMT \
static constexpr auto name = const_name("S") + const_name<N>(); \
static pybind11::dtype dtype() { \
return pybind11::dtype(std::string("S") + std::to_string(N)); \
}
template <size_t N>
struct npy_format_descriptor<char[N]> {
PYBIND11_DECL_CHAR_FMT
};
template <size_t N>
struct npy_format_descriptor<std::array<char, N>> {
PYBIND11_DECL_CHAR_FMT
};
#undef PYBIND11_DECL_CHAR_FMT
template <typename T>
struct npy_format_descriptor<T, enable_if_t<array_info<T>::is_array>> {
private:
using base_descr = npy_format_descriptor<typename array_info<T>::type>;
public:
static_assert(!array_info<T>::is_empty, "Zero-sized arrays are not supported");
static constexpr auto name
= const_name("(") + array_info<T>::extents + const_name(")") + base_descr::name;
static pybind11::dtype dtype() {
list shape;
array_info<T>::append_extents(shape);
return pybind11::dtype::from_args(
pybind11::make_tuple(base_descr::dtype(), std::move(shape)));
}
};
template <typename T>
struct npy_format_descriptor<T, enable_if_t<std::is_enum<T>::value>> {
private:
using base_descr = npy_format_descriptor<typename std::underlying_type<T>::type>;
public:
static constexpr auto name = base_descr::name;
static pybind11::dtype dtype() { return base_descr::dtype(); }
};
struct field_descriptor {
const char *name;
ssize_t offset;
ssize_t size;
std::string format;
dtype descr;
};
PYBIND11_NOINLINE void register_structured_dtype(any_container<field_descriptor> fields,
const std::type_info &tinfo,
ssize_t itemsize,
bool (*direct_converter)(PyObject *, void *&)) {
auto &numpy_internals = get_numpy_internals();
if (numpy_internals.get_type_info(tinfo, false)) {
pybind11_fail("NumPy: dtype is already registered");
}
// Use ordered fields because order matters as of NumPy 1.14:
// https://docs.scipy.org/doc/numpy/release.html#multiple-field-indexing-assignment-of-structured-arrays
std::vector<field_descriptor> ordered_fields(std::move(fields));
std::sort(
ordered_fields.begin(),
ordered_fields.end(),
[](const field_descriptor &a, const field_descriptor &b) { return a.offset < b.offset; });
list names, formats, offsets;
for (auto &field : ordered_fields) {
if (!field.descr) {
pybind11_fail(std::string("NumPy: unsupported field dtype: `") + field.name + "` @ "
+ tinfo.name());
}
names.append(pybind11::str(field.name));
formats.append(field.descr);
offsets.append(pybind11::int_(field.offset));
}
auto *dtype_ptr
= pybind11::dtype(std::move(names), std::move(formats), std::move(offsets), itemsize)
.release()
.ptr();
// There is an existing bug in NumPy (as of v1.11): trailing bytes are
// not encoded explicitly into the format string. This will supposedly
// get fixed in v1.12; for further details, see these:
// - https://github.com/numpy/numpy/issues/7797
// - https://github.com/numpy/numpy/pull/7798
// Because of this, we won't use numpy's logic to generate buffer format
// strings and will just do it ourselves.
ssize_t offset = 0;
std::ostringstream oss;
// mark the structure as unaligned with '^', because numpy and C++ don't
// always agree about alignment (particularly for complex), and we're
// explicitly listing all our padding. This depends on none of the fields
// overriding the endianness. Putting the ^ in front of individual fields
// isn't guaranteed to work due to https://github.com/numpy/numpy/issues/9049
oss << "^T{";
for (auto &field : ordered_fields) {
if (field.offset > offset) {
oss << (field.offset - offset) << 'x';
}
oss << field.format << ':' << field.name << ':';
offset = field.offset + field.size;
}
if (itemsize > offset) {
oss << (itemsize - offset) << 'x';
}
oss << '}';
auto format_str = oss.str();
// Smoke test: verify that NumPy properly parses our buffer format string
auto &api = npy_api::get();
auto arr = array(buffer_info(nullptr, itemsize, format_str, 1));
if (!api.PyArray_EquivTypes_(dtype_ptr, arr.dtype().ptr())) {
pybind11_fail("NumPy: invalid buffer descriptor!");
}
auto tindex = std::type_index(tinfo);
numpy_internals.registered_dtypes[tindex] = {dtype_ptr, std::move(format_str)};
with_internals([tindex, &direct_converter](internals &internals) {
internals.direct_conversions[tindex].push_back(direct_converter);
});
}
template <typename T, typename SFINAE>
struct npy_format_descriptor {
static_assert(is_pod_struct<T>::value,
"Attempt to use a non-POD or unimplemented POD type as a numpy dtype");
static constexpr auto name = make_caster<T>::name;
static pybind11::dtype dtype() { return reinterpret_borrow<pybind11::dtype>(dtype_ptr()); }
static std::string format() {
static auto format_str = get_numpy_internals().get_type_info<T>(true)->format_str;
return format_str;
}
static void register_dtype(any_container<field_descriptor> fields) {
register_structured_dtype(std::move(fields),
typeid(typename std::remove_cv<T>::type),
sizeof(T),
&direct_converter);
}
private:
static PyObject *dtype_ptr() {
static PyObject *ptr = get_numpy_internals().get_type_info<T>(true)->dtype_ptr;
return ptr;
}
static bool direct_converter(PyObject *obj, void *&value) {
auto &api = npy_api::get();
if (!PyObject_TypeCheck(obj, api.PyVoidArrType_Type_)) {
return false;
}
if (auto descr = reinterpret_steal<object>(api.PyArray_DescrFromScalar_(obj))) {
if (api.PyArray_EquivTypes_(dtype_ptr(), descr.ptr())) {
value = ((PyVoidScalarObject_Proxy *) obj)->obval;
return true;
}
}
return false;
}
};
#ifdef __CLION_IDE__ // replace heavy macro with dummy code for the IDE (doesn't affect code)
# define PYBIND11_NUMPY_DTYPE(Type, ...) ((void) 0)
# define PYBIND11_NUMPY_DTYPE_EX(Type, ...) ((void) 0)
#else
# define PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, Name) \
::pybind11::detail::field_descriptor { \
Name, offsetof(T, Field), sizeof(decltype(std::declval<T>().Field)), \
::pybind11::format_descriptor<decltype(std::declval<T>().Field)>::format(), \
::pybind11::detail::npy_format_descriptor< \
decltype(std::declval<T>().Field)>::dtype() \
}
// Extract name, offset and format descriptor for a struct field
# define PYBIND11_FIELD_DESCRIPTOR(T, Field) PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, #Field)
// The main idea of this macro is borrowed from https://github.com/swansontec/map-macro
// (C) William Swanson, Paul Fultz
# define PYBIND11_EVAL0(...) __VA_ARGS__
# define PYBIND11_EVAL1(...) PYBIND11_EVAL0(PYBIND11_EVAL0(PYBIND11_EVAL0(__VA_ARGS__)))
# define PYBIND11_EVAL2(...) PYBIND11_EVAL1(PYBIND11_EVAL1(PYBIND11_EVAL1(__VA_ARGS__)))
# define PYBIND11_EVAL3(...) PYBIND11_EVAL2(PYBIND11_EVAL2(PYBIND11_EVAL2(__VA_ARGS__)))
# define PYBIND11_EVAL4(...) PYBIND11_EVAL3(PYBIND11_EVAL3(PYBIND11_EVAL3(__VA_ARGS__)))
# define PYBIND11_EVAL(...) PYBIND11_EVAL4(PYBIND11_EVAL4(PYBIND11_EVAL4(__VA_ARGS__)))
# define PYBIND11_MAP_END(...)
# define PYBIND11_MAP_OUT
# define PYBIND11_MAP_COMMA ,
# define PYBIND11_MAP_GET_END() 0, PYBIND11_MAP_END
# define PYBIND11_MAP_NEXT0(test, next, ...) next PYBIND11_MAP_OUT
# define PYBIND11_MAP_NEXT1(test, next) PYBIND11_MAP_NEXT0(test, next, 0)
# define PYBIND11_MAP_NEXT(test, next) PYBIND11_MAP_NEXT1(PYBIND11_MAP_GET_END test, next)
# if defined(_MSC_VER) \
&& !defined(__clang__) // MSVC is not as eager to expand macros, hence this workaround
# define PYBIND11_MAP_LIST_NEXT1(test, next) \
PYBIND11_EVAL0(PYBIND11_MAP_NEXT0(test, PYBIND11_MAP_COMMA next, 0))
# else
# define PYBIND11_MAP_LIST_NEXT1(test, next) \
PYBIND11_MAP_NEXT0(test, PYBIND11_MAP_COMMA next, 0)
# endif
# define PYBIND11_MAP_LIST_NEXT(test, next) \
PYBIND11_MAP_LIST_NEXT1(PYBIND11_MAP_GET_END test, next)
# define PYBIND11_MAP_LIST0(f, t, x, peek, ...) \
f(t, x) PYBIND11_MAP_LIST_NEXT(peek, PYBIND11_MAP_LIST1)(f, t, peek, __VA_ARGS__)
# define PYBIND11_MAP_LIST1(f, t, x, peek, ...) \
f(t, x) PYBIND11_MAP_LIST_NEXT(peek, PYBIND11_MAP_LIST0)(f, t, peek, __VA_ARGS__)
// PYBIND11_MAP_LIST(f, t, a1, a2, ...) expands to f(t, a1), f(t, a2), ...
# define PYBIND11_MAP_LIST(f, t, ...) \
PYBIND11_EVAL(PYBIND11_MAP_LIST1(f, t, __VA_ARGS__, (), 0))
# define PYBIND11_NUMPY_DTYPE(Type, ...) \
::pybind11::detail::npy_format_descriptor<Type>::register_dtype( \
::std::vector<::pybind11::detail::field_descriptor>{ \
PYBIND11_MAP_LIST(PYBIND11_FIELD_DESCRIPTOR, Type, __VA_ARGS__)})
# if defined(_MSC_VER) && !defined(__clang__)
# define PYBIND11_MAP2_LIST_NEXT1(test, next) \
PYBIND11_EVAL0(PYBIND11_MAP_NEXT0(test, PYBIND11_MAP_COMMA next, 0))
# else
# define PYBIND11_MAP2_LIST_NEXT1(test, next) \
PYBIND11_MAP_NEXT0(test, PYBIND11_MAP_COMMA next, 0)
# endif
# define PYBIND11_MAP2_LIST_NEXT(test, next) \
PYBIND11_MAP2_LIST_NEXT1(PYBIND11_MAP_GET_END test, next)
# define PYBIND11_MAP2_LIST0(f, t, x1, x2, peek, ...) \
f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT(peek, PYBIND11_MAP2_LIST1)(f, t, peek, __VA_ARGS__)
# define PYBIND11_MAP2_LIST1(f, t, x1, x2, peek, ...) \
f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT(peek, PYBIND11_MAP2_LIST0)(f, t, peek, __VA_ARGS__)
// PYBIND11_MAP2_LIST(f, t, a1, a2, ...) expands to f(t, a1, a2), f(t, a3, a4), ...
# define PYBIND11_MAP2_LIST(f, t, ...) \
PYBIND11_EVAL(PYBIND11_MAP2_LIST1(f, t, __VA_ARGS__, (), 0))
# define PYBIND11_NUMPY_DTYPE_EX(Type, ...) \
::pybind11::detail::npy_format_descriptor<Type>::register_dtype( \
::std::vector<::pybind11::detail::field_descriptor>{ \
PYBIND11_MAP2_LIST(PYBIND11_FIELD_DESCRIPTOR_EX, Type, __VA_ARGS__)})
#endif // __CLION_IDE__
class common_iterator {
public:
using container_type = std::vector<ssize_t>;
using value_type = container_type::value_type;
using size_type = container_type::size_type;
common_iterator() : m_strides() {}
common_iterator(void *ptr, const container_type &strides, const container_type &shape)
: p_ptr(reinterpret_cast<char *>(ptr)), m_strides(strides.size()) {
m_strides.back() = static_cast<value_type>(strides.back());
for (size_type i = m_strides.size() - 1; i != 0; --i) {
size_type j = i - 1;
auto s = static_cast<value_type>(shape[i]);
m_strides[j] = strides[j] + m_strides[i] - strides[i] * s;
}
}
void increment(size_type dim) { p_ptr += m_strides[dim]; }
void *data() const { return p_ptr; }
private:
char *p_ptr{nullptr};
container_type m_strides;
};
template <size_t N>
class multi_array_iterator {
public:
using container_type = std::vector<ssize_t>;
multi_array_iterator(const std::array<buffer_info, N> &buffers, const container_type &shape)
: m_shape(shape.size()), m_index(shape.size(), 0), m_common_iterator() {
// Manual copy to avoid conversion warning if using std::copy
for (size_t i = 0; i < shape.size(); ++i) {
m_shape[i] = shape[i];
}
container_type strides(shape.size());
for (size_t i = 0; i < N; ++i) {
init_common_iterator(buffers[i], shape, m_common_iterator[i], strides);
}
}
multi_array_iterator &operator++() {
for (size_t j = m_index.size(); j != 0; --j) {
size_t i = j - 1;
if (++m_index[i] != m_shape[i]) {
increment_common_iterator(i);
break;
}
m_index[i] = 0;
}
return *this;
}
template <size_t K, class T = void>
T *data() const {
return reinterpret_cast<T *>(m_common_iterator[K].data());
}
private:
using common_iter = common_iterator;
void init_common_iterator(const buffer_info &buffer,
const container_type &shape,
common_iter &iterator,
container_type &strides) {
auto buffer_shape_iter = buffer.shape.rbegin();
auto buffer_strides_iter = buffer.strides.rbegin();
auto shape_iter = shape.rbegin();
auto strides_iter = strides.rbegin();
while (buffer_shape_iter != buffer.shape.rend()) {
if (*shape_iter == *buffer_shape_iter) {
*strides_iter = *buffer_strides_iter;
} else {
*strides_iter = 0;
}
++buffer_shape_iter;
++buffer_strides_iter;
++shape_iter;
++strides_iter;
}
std::fill(strides_iter, strides.rend(), 0);
iterator = common_iter(buffer.ptr, strides, shape);
}
void increment_common_iterator(size_t dim) {
for (auto &iter : m_common_iterator) {
iter.increment(dim);
}
}
container_type m_shape;
container_type m_index;
std::array<common_iter, N> m_common_iterator;
};
enum class broadcast_trivial { non_trivial, c_trivial, f_trivial };
// Populates the shape and number of dimensions for the set of buffers. Returns a
// broadcast_trivial enum value indicating whether the broadcast is "trivial"--that is, has each
// buffer being either a singleton or a full-size, C-contiguous (`c_trivial`) or Fortran-contiguous
// (`f_trivial`) storage buffer; returns `non_trivial` otherwise.
template <size_t N>
broadcast_trivial
broadcast(const std::array<buffer_info, N> &buffers, ssize_t &ndim, std::vector<ssize_t> &shape) {
ndim = std::accumulate(
buffers.begin(), buffers.end(), ssize_t(0), [](ssize_t res, const buffer_info &buf) {
return std::max(res, buf.ndim);
});
shape.clear();
shape.resize((size_t) ndim, 1);
// Figure out the output size, and make sure all input arrays conform (i.e. are either size 1
// or the full size).
for (size_t i = 0; i < N; ++i) {
auto res_iter = shape.rbegin();
auto end = buffers[i].shape.rend();
for (auto shape_iter = buffers[i].shape.rbegin(); shape_iter != end;
++shape_iter, ++res_iter) {
const auto &dim_size_in = *shape_iter;
auto &dim_size_out = *res_iter;
// Each input dimension can either be 1 or `n`, but `n` values must match across
// buffers
if (dim_size_out == 1) {
dim_size_out = dim_size_in;
} else if (dim_size_in != 1 && dim_size_in != dim_size_out) {
pybind11_fail("pybind11::vectorize: incompatible size/dimension of inputs!");
}
}
}
bool trivial_broadcast_c = true;
bool trivial_broadcast_f = true;
for (size_t i = 0; i < N && (trivial_broadcast_c || trivial_broadcast_f); ++i) {
if (buffers[i].size == 1) {
continue;
}
// Require the same number of dimensions:
if (buffers[i].ndim != ndim) {
return broadcast_trivial::non_trivial;
}
// Require all dimensions be full-size:
if (!std::equal(buffers[i].shape.cbegin(), buffers[i].shape.cend(), shape.cbegin())) {
return broadcast_trivial::non_trivial;
}
// Check for C contiguity (but only if previous inputs were also C contiguous)
if (trivial_broadcast_c) {
ssize_t expect_stride = buffers[i].itemsize;
auto end = buffers[i].shape.crend();
for (auto shape_iter = buffers[i].shape.crbegin(),
stride_iter = buffers[i].strides.crbegin();
trivial_broadcast_c && shape_iter != end;
++shape_iter, ++stride_iter) {
if (expect_stride == *stride_iter) {
expect_stride *= *shape_iter;
} else {
trivial_broadcast_c = false;
}
}
}
// Check for Fortran contiguity (if previous inputs were also F contiguous)
if (trivial_broadcast_f) {
ssize_t expect_stride = buffers[i].itemsize;
auto end = buffers[i].shape.cend();
for (auto shape_iter = buffers[i].shape.cbegin(),
stride_iter = buffers[i].strides.cbegin();
trivial_broadcast_f && shape_iter != end;
++shape_iter, ++stride_iter) {
if (expect_stride == *stride_iter) {
expect_stride *= *shape_iter;
} else {
trivial_broadcast_f = false;
}
}
}
}
return trivial_broadcast_c ? broadcast_trivial::c_trivial
: trivial_broadcast_f ? broadcast_trivial::f_trivial
: broadcast_trivial::non_trivial;
}
template <typename T>
struct vectorize_arg {
static_assert(!std::is_rvalue_reference<T>::value,
"Functions with rvalue reference arguments cannot be vectorized");
// The wrapped function gets called with this type:
using call_type = remove_reference_t<T>;
// Is this a vectorized argument?
static constexpr bool vectorize
= satisfies_any_of<call_type, std::is_arithmetic, is_complex, is_pod>::value
&& satisfies_none_of<call_type,
std::is_pointer,
std::is_array,
is_std_array,
std::is_enum>::value
&& (!std::is_reference<T>::value
|| (std::is_lvalue_reference<T>::value && std::is_const<call_type>::value));
// Accept this type: an array for vectorized types, otherwise the type as-is:
using type = conditional_t<vectorize, array_t<remove_cv_t<call_type>, array::forcecast>, T>;
};
// py::vectorize when a return type is present
template <typename Func, typename Return, typename... Args>
struct vectorize_returned_array {
using Type = array_t<Return>;
static Type create(broadcast_trivial trivial, const std::vector<ssize_t> &shape) {
if (trivial == broadcast_trivial::f_trivial) {
return array_t<Return, array::f_style>(shape);
}
return array_t<Return>(shape);
}
static Return *mutable_data(Type &array) { return array.mutable_data(); }
static Return call(Func &f, Args &...args) { return f(args...); }
static void call(Return *out, size_t i, Func &f, Args &...args) { out[i] = f(args...); }
};
// py::vectorize when a return type is not present
template <typename Func, typename... Args>
struct vectorize_returned_array<Func, void, Args...> {
using Type = none;
static Type create(broadcast_trivial, const std::vector<ssize_t> &) { return none(); }
static void *mutable_data(Type &) { return nullptr; }
static detail::void_type call(Func &f, Args &...args) {
f(args...);
return {};
}
static void call(void *, size_t, Func &f, Args &...args) { f(args...); }
};
template <typename Func, typename Return, typename... Args>
struct vectorize_helper {
// NVCC for some reason breaks if NVectorized is private
#ifdef __CUDACC__
public:
#else
private:
#endif
static constexpr size_t N = sizeof...(Args);
static constexpr size_t NVectorized = constexpr_sum(vectorize_arg<Args>::vectorize...);
static_assert(
NVectorized >= 1,
"pybind11::vectorize(...) requires a function with at least one vectorizable argument");
public:
template <typename T,
// SFINAE to prevent shadowing the copy constructor.
typename = detail::enable_if_t<
!std::is_same<vectorize_helper, typename std::decay<T>::type>::value>>
explicit vectorize_helper(T &&f) : f(std::forward<T>(f)) {}
object operator()(typename vectorize_arg<Args>::type... args) {
return run(args...,
make_index_sequence<N>(),
select_indices<vectorize_arg<Args>::vectorize...>(),
make_index_sequence<NVectorized>());
}
private:
remove_reference_t<Func> f;
// Internal compiler error in MSVC 19.16.27025.1 (Visual Studio 2017 15.9.4), when compiling
// with "/permissive-" flag when arg_call_types is manually inlined.
using arg_call_types = std::tuple<typename vectorize_arg<Args>::call_type...>;
template <size_t Index>
using param_n_t = typename std::tuple_element<Index, arg_call_types>::type;
using returned_array = vectorize_returned_array<Func, Return, Args...>;
// Runs a vectorized function given arguments tuple and three index sequences:
// - Index is the full set of 0 ... (N-1) argument indices;
// - VIndex is the subset of argument indices with vectorized parameters, letting us access
// vectorized arguments (anything not in this sequence is passed through)
// - BIndex is a incremental sequence (beginning at 0) of the same size as VIndex, so that
// we can store vectorized buffer_infos in an array (argument VIndex has its buffer at
// index BIndex in the array).
template <size_t... Index, size_t... VIndex, size_t... BIndex>
object run(typename vectorize_arg<Args>::type &...args,
index_sequence<Index...> i_seq,
index_sequence<VIndex...> vi_seq,
index_sequence<BIndex...> bi_seq) {
// Pointers to values the function was called with; the vectorized ones set here will start
// out as array_t<T> pointers, but they will be changed them to T pointers before we make
// call the wrapped function. Non-vectorized pointers are left as-is.
std::array<void *, N> params{{reinterpret_cast<void *>(&args)...}};
// The array of `buffer_info`s of vectorized arguments:
std::array<buffer_info, NVectorized> buffers{
{reinterpret_cast<array *>(params[VIndex])->request()...}};
/* Determine dimensions parameters of output array */
ssize_t nd = 0;
std::vector<ssize_t> shape(0);
auto trivial = broadcast(buffers, nd, shape);
auto ndim = (size_t) nd;
size_t size
= std::accumulate(shape.begin(), shape.end(), (size_t) 1, std::multiplies<size_t>());
// If all arguments are 0-dimension arrays (i.e. single values) return a plain value (i.e.
// not wrapped in an array).
if (size == 1 && ndim == 0) {
PYBIND11_EXPAND_SIDE_EFFECTS(params[VIndex] = buffers[BIndex].ptr);
return cast(
returned_array::call(f, *reinterpret_cast<param_n_t<Index> *>(params[Index])...));
}
auto result = returned_array::create(trivial, shape);
PYBIND11_WARNING_PUSH
#ifdef PYBIND11_DETECTED_CLANG_WITH_MISLEADING_CALL_STD_MOVE_EXPLICITLY_WARNING
PYBIND11_WARNING_DISABLE_CLANG("-Wreturn-std-move")
#endif
if (size == 0) {
return result;
}
/* Call the function */
auto *mutable_data = returned_array::mutable_data(result);
if (trivial == broadcast_trivial::non_trivial) {
apply_broadcast(buffers, params, mutable_data, size, shape, i_seq, vi_seq, bi_seq);
} else {
apply_trivial(buffers, params, mutable_data, size, i_seq, vi_seq, bi_seq);
}
return result;
PYBIND11_WARNING_POP
}
template <size_t... Index, size_t... VIndex, size_t... BIndex>
void apply_trivial(std::array<buffer_info, NVectorized> &buffers,
std::array<void *, N> ¶ms,
Return *out,
size_t size,
index_sequence<Index...>,
index_sequence<VIndex...>,
index_sequence<BIndex...>) {
// Initialize an array of mutable byte references and sizes with references set to the
// appropriate pointer in `params`; as we iterate, we'll increment each pointer by its size
// (except for singletons, which get an increment of 0).
std::array<std::pair<unsigned char *&, const size_t>, NVectorized> vecparams{
{std::pair<unsigned char *&, const size_t>(
reinterpret_cast<unsigned char *&>(params[VIndex] = buffers[BIndex].ptr),
buffers[BIndex].size == 1 ? 0 : sizeof(param_n_t<VIndex>))...}};
for (size_t i = 0; i < size; ++i) {
returned_array::call(
out, i, f, *reinterpret_cast<param_n_t<Index> *>(params[Index])...);
for (auto &x : vecparams) {
x.first += x.second;
}
}
}
template <size_t... Index, size_t... VIndex, size_t... BIndex>
void apply_broadcast(std::array<buffer_info, NVectorized> &buffers,
std::array<void *, N> ¶ms,
Return *out,
size_t size,
const std::vector<ssize_t> &output_shape,
index_sequence<Index...>,
index_sequence<VIndex...>,
index_sequence<BIndex...>) {
multi_array_iterator<NVectorized> input_iter(buffers, output_shape);
for (size_t i = 0; i < size; ++i, ++input_iter) {
PYBIND11_EXPAND_SIDE_EFFECTS((params[VIndex] = input_iter.template data<BIndex>()));
returned_array::call(
out, i, f, *reinterpret_cast<param_n_t<Index> *>(std::get<Index>(params))...);
}
}
};
template <typename Func, typename Return, typename... Args>
vectorize_helper<Func, Return, Args...> vectorize_extractor(const Func &f, Return (*)(Args...)) {
return detail::vectorize_helper<Func, Return, Args...>(f);
}
template <typename T, int Flags>
struct handle_type_name<array_t<T, Flags>> {
static constexpr auto name
= const_name("numpy.ndarray[") + npy_format_descriptor<T>::name + const_name("]");
};
PYBIND11_NAMESPACE_END(detail)
// Vanilla pointer vectorizer:
template <typename Return, typename... Args>
detail::vectorize_helper<Return (*)(Args...), Return, Args...> vectorize(Return (*f)(Args...)) {
return detail::vectorize_helper<Return (*)(Args...), Return, Args...>(f);
}
// lambda vectorizer:
template <typename Func, detail::enable_if_t<detail::is_lambda<Func>::value, int> = 0>
auto vectorize(Func &&f)
-> decltype(detail::vectorize_extractor(std::forward<Func>(f),
(detail::function_signature_t<Func> *) nullptr)) {
return detail::vectorize_extractor(std::forward<Func>(f),
(detail::function_signature_t<Func> *) nullptr);
}
// Vectorize a class method (non-const):
template <typename Return,
typename Class,
typename... Args,
typename Helper = detail::vectorize_helper<
decltype(std::mem_fn(std::declval<Return (Class::*)(Args...)>())),
Return,
Class *,
Args...>>
Helper vectorize(Return (Class::*f)(Args...)) {
return Helper(std::mem_fn(f));
}
// Vectorize a class method (const):
template <typename Return,
typename Class,
typename... Args,
typename Helper = detail::vectorize_helper<
decltype(std::mem_fn(std::declval<Return (Class::*)(Args...) const>())),
Return,
const Class *,
Args...>>
Helper vectorize(Return (Class::*f)(Args...) const) {
return Helper(std::mem_fn(f));
}
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
==========================================================================================================================
SOURCE CODE FILE: operators.h
LINES: 1
SIZE: 9.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\operators.h
ENCODING: utf-8
```h
/*
pybind11/operator.h: Metatemplates for operator overloading
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
/// Enumeration with all supported operator types
enum op_id : int {
op_add,
op_sub,
op_mul,
op_div,
op_mod,
op_divmod,
op_pow,
op_lshift,
op_rshift,
op_and,
op_xor,
op_or,
op_neg,
op_pos,
op_abs,
op_invert,
op_int,
op_long,
op_float,
op_str,
op_cmp,
op_gt,
op_ge,
op_lt,
op_le,
op_eq,
op_ne,
op_iadd,
op_isub,
op_imul,
op_idiv,
op_imod,
op_ilshift,
op_irshift,
op_iand,
op_ixor,
op_ior,
op_complex,
op_bool,
op_nonzero,
op_repr,
op_truediv,
op_itruediv,
op_hash
};
enum op_type : int {
op_l, /* base type on left */
op_r, /* base type on right */
op_u /* unary operator */
};
struct self_t {};
static const self_t self = self_t();
/// Type for an unused type slot
struct undefined_t {};
/// Don't warn about an unused variable
inline self_t __self() { return self; }
/// base template of operator implementations
template <op_id, op_type, typename B, typename L, typename R>
struct op_impl {};
/// Operator implementation generator
template <op_id id, op_type ot, typename L, typename R>
struct op_ {
static constexpr bool op_enable_if_hook = true;
template <typename Class, typename... Extra>
void execute(Class &cl, const Extra &...extra) const {
using Base = typename Class::type;
using L_type = conditional_t<std::is_same<L, self_t>::value, Base, L>;
using R_type = conditional_t<std::is_same<R, self_t>::value, Base, R>;
using op = op_impl<id, ot, Base, L_type, R_type>;
cl.def(op::name(), &op::execute, is_operator(), extra...);
}
template <typename Class, typename... Extra>
void execute_cast(Class &cl, const Extra &...extra) const {
using Base = typename Class::type;
using L_type = conditional_t<std::is_same<L, self_t>::value, Base, L>;
using R_type = conditional_t<std::is_same<R, self_t>::value, Base, R>;
using op = op_impl<id, ot, Base, L_type, R_type>;
cl.def(op::name(), &op::execute_cast, is_operator(), extra...);
}
};
#define PYBIND11_BINARY_OPERATOR(id, rid, op, expr) \
template <typename B, typename L, typename R> \
struct op_impl<op_##id, op_l, B, L, R> { \
static char const *name() { return "__" #id "__"; } \
static auto execute(const L &l, const R &r) -> decltype(expr) { return (expr); } \
static B execute_cast(const L &l, const R &r) { return B(expr); } \
}; \
template <typename B, typename L, typename R> \
struct op_impl<op_##id, op_r, B, L, R> { \
static char const *name() { return "__" #rid "__"; } \
static auto execute(const R &r, const L &l) -> decltype(expr) { return (expr); } \
static B execute_cast(const R &r, const L &l) { return B(expr); } \
}; \
inline op_<op_##id, op_l, self_t, self_t> op(const self_t &, const self_t &) { \
return op_<op_##id, op_l, self_t, self_t>(); \
} \
template <typename T> \
op_<op_##id, op_l, self_t, T> op(const self_t &, const T &) { \
return op_<op_##id, op_l, self_t, T>(); \
} \
template <typename T> \
op_<op_##id, op_r, T, self_t> op(const T &, const self_t &) { \
return op_<op_##id, op_r, T, self_t>(); \
}
#define PYBIND11_INPLACE_OPERATOR(id, op, expr) \
template <typename B, typename L, typename R> \
struct op_impl<op_##id, op_l, B, L, R> { \
static char const *name() { return "__" #id "__"; } \
static auto execute(L &l, const R &r) -> decltype(expr) { return expr; } \
static B execute_cast(L &l, const R &r) { return B(expr); } \
}; \
template <typename T> \
op_<op_##id, op_l, self_t, T> op(const self_t &, const T &) { \
return op_<op_##id, op_l, self_t, T>(); \
}
#define PYBIND11_UNARY_OPERATOR(id, op, expr) \
template <typename B, typename L> \
struct op_impl<op_##id, op_u, B, L, undefined_t> { \
static char const *name() { return "__" #id "__"; } \
static auto execute(const L &l) -> decltype(expr) { return expr; } \
static B execute_cast(const L &l) { return B(expr); } \
}; \
inline op_<op_##id, op_u, self_t, undefined_t> op(const self_t &) { \
return op_<op_##id, op_u, self_t, undefined_t>(); \
}
PYBIND11_BINARY_OPERATOR(sub, rsub, operator-, l - r)
PYBIND11_BINARY_OPERATOR(add, radd, operator+, l + r)
PYBIND11_BINARY_OPERATOR(mul, rmul, operator*, l *r)
PYBIND11_BINARY_OPERATOR(truediv, rtruediv, operator/, l / r)
PYBIND11_BINARY_OPERATOR(mod, rmod, operator%, l % r)
PYBIND11_BINARY_OPERATOR(lshift, rlshift, operator<<, l << r)
PYBIND11_BINARY_OPERATOR(rshift, rrshift, operator>>, l >> r)
PYBIND11_BINARY_OPERATOR(and, rand, operator&, l &r)
PYBIND11_BINARY_OPERATOR(xor, rxor, operator^, l ^ r)
PYBIND11_BINARY_OPERATOR(eq, eq, operator==, l == r)
PYBIND11_BINARY_OPERATOR(ne, ne, operator!=, l != r)
PYBIND11_BINARY_OPERATOR(or, ror, operator|, l | r)
PYBIND11_BINARY_OPERATOR(gt, lt, operator>, l > r)
PYBIND11_BINARY_OPERATOR(ge, le, operator>=, l >= r)
PYBIND11_BINARY_OPERATOR(lt, gt, operator<, l < r)
PYBIND11_BINARY_OPERATOR(le, ge, operator<=, l <= r)
// PYBIND11_BINARY_OPERATOR(pow, rpow, pow, std::pow(l, r))
PYBIND11_INPLACE_OPERATOR(iadd, operator+=, l += r)
PYBIND11_INPLACE_OPERATOR(isub, operator-=, l -= r)
PYBIND11_INPLACE_OPERATOR(imul, operator*=, l *= r)
PYBIND11_INPLACE_OPERATOR(itruediv, operator/=, l /= r)
PYBIND11_INPLACE_OPERATOR(imod, operator%=, l %= r)
PYBIND11_INPLACE_OPERATOR(ilshift, operator<<=, l <<= r)
PYBIND11_INPLACE_OPERATOR(irshift, operator>>=, l >>= r)
PYBIND11_INPLACE_OPERATOR(iand, operator&=, l &= r)
PYBIND11_INPLACE_OPERATOR(ixor, operator^=, l ^= r)
PYBIND11_INPLACE_OPERATOR(ior, operator|=, l |= r)
PYBIND11_UNARY_OPERATOR(neg, operator-, -l)
PYBIND11_UNARY_OPERATOR(pos, operator+, +l)
// WARNING: This usage of `abs` should only be done for existing STL overloads.
// Adding overloads directly in to the `std::` namespace is advised against:
// https://en.cppreference.com/w/cpp/language/extending_std
PYBIND11_UNARY_OPERATOR(abs, abs, std::abs(l))
PYBIND11_UNARY_OPERATOR(hash, hash, std::hash<L>()(l))
PYBIND11_UNARY_OPERATOR(invert, operator~, (~l))
PYBIND11_UNARY_OPERATOR(bool, operator!, !!l)
PYBIND11_UNARY_OPERATOR(int, int_, (int) l)
PYBIND11_UNARY_OPERATOR(float, float_, (double) l)
#undef PYBIND11_BINARY_OPERATOR
#undef PYBIND11_INPLACE_OPERATOR
#undef PYBIND11_UNARY_OPERATOR
PYBIND11_NAMESPACE_END(detail)
using detail::self;
// Add named operators so that they are accessible via `py::`.
using detail::hash;
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
========================================================================================================================
SOURCE CODE FILE: options.h
LINES: 1
SIZE: 2.76 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\options.h
ENCODING: utf-8
```h
/*
pybind11/options.h: global settings that are configurable at runtime.
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
class options {
public:
// Default RAII constructor, which leaves settings as they currently are.
options() : previous_state(global_state()) {}
// Class is non-copyable.
options(const options &) = delete;
options &operator=(const options &) = delete;
// Destructor, which restores settings that were in effect before.
~options() { global_state() = previous_state; }
// Setter methods (affect the global state):
options &disable_user_defined_docstrings() & {
global_state().show_user_defined_docstrings = false;
return *this;
}
options &enable_user_defined_docstrings() & {
global_state().show_user_defined_docstrings = true;
return *this;
}
options &disable_function_signatures() & {
global_state().show_function_signatures = false;
return *this;
}
options &enable_function_signatures() & {
global_state().show_function_signatures = true;
return *this;
}
options &disable_enum_members_docstring() & {
global_state().show_enum_members_docstring = false;
return *this;
}
options &enable_enum_members_docstring() & {
global_state().show_enum_members_docstring = true;
return *this;
}
// Getter methods (return the global state):
static bool show_user_defined_docstrings() {
return global_state().show_user_defined_docstrings;
}
static bool show_function_signatures() { return global_state().show_function_signatures; }
static bool show_enum_members_docstring() {
return global_state().show_enum_members_docstring;
}
// This type is not meant to be allocated on the heap.
void *operator new(size_t) = delete;
private:
struct state {
bool show_user_defined_docstrings = true; //< Include user-supplied texts in docstrings.
bool show_function_signatures = true; //< Include auto-generated function signatures
// in docstrings.
bool show_enum_members_docstring = true; //< Include auto-generated member list in enum
// docstrings.
};
static state &global_state() {
static state instance;
return instance;
}
state previous_state;
};
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=========================================================================================================================
SOURCE CODE FILE: pybind11.h
LINES: 32
SIZE: 129.76 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\pybind11.h
ENCODING: utf-8
```h
/*
pybind11/pybind11.h: Main header file of the C++11 python
binding generator library
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/class.h"
#include "detail/exception_translation.h"
#include "detail/init.h"
#include "attr.h"
#include "gil.h"
#include "gil_safe_call_once.h"
#include "options.h"
#include "typing.h"
#include <cstdlib>
#include <cstring>
#include <memory>
#include <new>
#include <string>
#include <utility>
#include <vector>
#if defined(__cpp_lib_launder) && !(defined(_MSC_VER) && (_MSC_VER < 1914))
# define PYBIND11_STD_LAUNDER std::launder
# define PYBIND11_HAS_STD_LAUNDER 1
#else
# define PYBIND11_STD_LAUNDER
# define PYBIND11_HAS_STD_LAUNDER 0
#endif
#if defined(__GNUG__) && !defined(__clang__)
# include <cxxabi.h>
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
/* https://stackoverflow.com/questions/46798456/handling-gccs-noexcept-type-warning
This warning is about ABI compatibility, not code health.
It is only actually needed in a couple places, but apparently GCC 7 "generates this warning if
and only if the first template instantiation ... involves noexcept" [stackoverflow], therefore
it could get triggered from seemingly random places, depending on user code.
No other GCC version generates this warning.
*/
#if defined(__GNUC__) && __GNUC__ == 7
PYBIND11_WARNING_DISABLE_GCC("-Wnoexcept-type")
#endif
PYBIND11_WARNING_DISABLE_MSVC(4127)
PYBIND11_NAMESPACE_BEGIN(detail)
inline std::string replace_newlines_and_squash(const char *text) {
const char *whitespaces = " \t\n\r\f\v";
std::string result(text);
bool previous_is_whitespace = false;
if (result.size() >= 2) {
// Do not modify string representations
char first_char = result[0];
char last_char = result[result.size() - 1];
if (first_char == last_char && first_char == '\'') {
return result;
}
}
result.clear();
// Replace characters in whitespaces array with spaces and squash consecutive spaces
while (*text != '\0') {
if (std::strchr(whitespaces, *text)) {
if (!previous_is_whitespace) {
result += ' ';
previous_is_whitespace = true;
}
} else {
result += *text;
previous_is_whitespace = false;
}
++text;
}
// Strip leading and trailing whitespaces
const size_t str_begin = result.find_first_not_of(whitespaces);
if (str_begin == std::string::npos) {
return "";
}
const size_t str_end = result.find_last_not_of(whitespaces);
const size_t str_range = str_end - str_begin + 1;
return result.substr(str_begin, str_range);
}
#if defined(_MSC_VER)
# define PYBIND11_COMPAT_STRDUP _strdup
#else
# define PYBIND11_COMPAT_STRDUP strdup
#endif
PYBIND11_NAMESPACE_END(detail)
/// Wraps an arbitrary C++ function/method/lambda function/.. into a callable Python object
class cpp_function : public function {
public:
cpp_function() = default;
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(std::nullptr_t) {}
cpp_function(std::nullptr_t, const is_setter &) {}
/// Construct a cpp_function from a vanilla function pointer
template <typename Return, typename... Args, typename... Extra>
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(Return (*f)(Args...), const Extra &...extra) {
initialize(f, f, extra...);
}
/// Construct a cpp_function from a lambda function (possibly with internal state)
template <typename Func,
typename... Extra,
typename = detail::enable_if_t<detail::is_lambda<Func>::value>>
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(Func &&f, const Extra &...extra) {
initialize(
std::forward<Func>(f), (detail::function_signature_t<Func> *) nullptr, extra...);
}
/// Construct a cpp_function from a class method (non-const, no ref-qualifier)
template <typename Return, typename Class, typename... Arg, typename... Extra>
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(Return (Class::*f)(Arg...), const Extra &...extra) {
initialize(
[f](Class *c, Arg... args) -> Return { return (c->*f)(std::forward<Arg>(args)...); },
(Return(*)(Class *, Arg...)) nullptr,
extra...);
}
/// Construct a cpp_function from a class method (non-const, lvalue ref-qualifier)
/// A copy of the overload for non-const functions without explicit ref-qualifier
/// but with an added `&`.
template <typename Return, typename Class, typename... Arg, typename... Extra>
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(Return (Class::*f)(Arg...) &, const Extra &...extra) {
initialize(
[f](Class *c, Arg... args) -> Return { return (c->*f)(std::forward<Arg>(args)...); },
(Return(*)(Class *, Arg...)) nullptr,
extra...);
}
/// Construct a cpp_function from a class method (const, no ref-qualifier)
template <typename Return, typename Class, typename... Arg, typename... Extra>
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(Return (Class::*f)(Arg...) const, const Extra &...extra) {
initialize([f](const Class *c,
Arg... args) -> Return { return (c->*f)(std::forward<Arg>(args)...); },
(Return(*)(const Class *, Arg...)) nullptr,
extra...);
}
/// Construct a cpp_function from a class method (const, lvalue ref-qualifier)
/// A copy of the overload for const functions without explicit ref-qualifier
/// but with an added `&`.
template <typename Return, typename Class, typename... Arg, typename... Extra>
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(Return (Class::*f)(Arg...) const &, const Extra &...extra) {
initialize([f](const Class *c,
Arg... args) -> Return { return (c->*f)(std::forward<Arg>(args)...); },
(Return(*)(const Class *, Arg...)) nullptr,
extra...);
}
/// Return the function name
object name() const { return attr("__name__"); }
protected:
struct InitializingFunctionRecordDeleter {
// `destruct(function_record, false)`: `initialize_generic` copies strings and
// takes care of cleaning up in case of exceptions. So pass `false` to `free_strings`.
void operator()(detail::function_record *rec) { destruct(rec, false); }
};
using unique_function_record
= std::unique_ptr<detail::function_record, InitializingFunctionRecordDeleter>;
/// Space optimization: don't inline this frequently instantiated fragment
PYBIND11_NOINLINE unique_function_record make_function_record() {
return unique_function_record(new detail::function_record());
}
/// Special internal constructor for functors, lambda functions, etc.
template <typename Func, typename Return, typename... Args, typename... Extra>
void initialize(Func &&f, Return (*)(Args...), const Extra &...extra) {
using namespace detail;
struct capture {
remove_reference_t<Func> f;
};
/* Store the function including any extra state it might have (e.g. a lambda capture
* object) */
// The unique_ptr makes sure nothing is leaked in case of an exception.
auto unique_rec = make_function_record();
auto *rec = unique_rec.get();
/* Store the capture object directly in the function record if there is enough space */
if (sizeof(capture) <= sizeof(rec->data)) {
/* Without these pragmas, GCC warns that there might not be
enough space to use the placement new operator. However, the
'if' statement above ensures that this is the case. */
PYBIND11_WARNING_PUSH
#if defined(__GNUG__) && __GNUC__ >= 6
PYBIND11_WARNING_DISABLE_GCC("-Wplacement-new")
#endif
new ((capture *) &rec->data) capture{std::forward<Func>(f)};
#if !PYBIND11_HAS_STD_LAUNDER
PYBIND11_WARNING_DISABLE_GCC("-Wstrict-aliasing")
#endif
// UB without std::launder, but without breaking ABI and/or
// a significant refactoring it's "impossible" to solve.
if (!std::is_trivially_destructible<capture>::value) {
rec->free_data = [](function_record *r) {
auto data = PYBIND11_STD_LAUNDER((capture *) &r->data);
(void) data;
data->~capture();
};
}
PYBIND11_WARNING_POP
} else {
rec->data[0] = new capture{std::forward<Func>(f)};
rec->free_data = [](function_record *r) { delete ((capture *) r->data[0]); };
}
/* Type casters for the function arguments and return value */
using cast_in = argument_loader<Args...>;
using cast_out
= make_caster<conditional_t<std::is_void<Return>::value, void_type, Return>>;
static_assert(
expected_num_args<Extra...>(
sizeof...(Args), cast_in::args_pos >= 0, cast_in::has_kwargs),
"The number of argument annotations does not match the number of function arguments");
/* Dispatch code which converts function arguments and performs the actual function call */
rec->impl = [](function_call &call) -> handle {
cast_in args_converter;
/* Try to cast the function arguments into the C++ domain */
if (!args_converter.load_args(call)) {
return PYBIND11_TRY_NEXT_OVERLOAD;
}
/* Invoke call policy pre-call hook */
process_attributes<Extra...>::precall(call);
/* Get a pointer to the capture object */
const auto *data = (sizeof(capture) <= sizeof(call.func.data) ? &call.func.data
: call.func.data[0]);
auto *cap = const_cast<capture *>(reinterpret_cast<const capture *>(data));
/* Override policy for rvalues -- usually to enforce rvp::move on an rvalue */
return_value_policy policy
= return_value_policy_override<Return>::policy(call.func.policy);
/* Function scope guard -- defaults to the compile-to-nothing `void_type` */
using Guard = extract_guard_t<Extra...>;
/* Perform the function call */
handle result;
if (call.func.is_setter) {
(void) std::move(args_converter).template call<Return, Guard>(cap->f);
result = none().release();
} else {
result = cast_out::cast(
std::move(args_converter).template call<Return, Guard>(cap->f),
policy,
call.parent);
}
/* Invoke call policy post-call hook */
process_attributes<Extra...>::postcall(call, result);
return result;
};
rec->nargs_pos = cast_in::args_pos >= 0
? static_cast<std::uint16_t>(cast_in::args_pos)
: sizeof...(Args) - cast_in::has_kwargs; // Will get reduced more if
// we have a kw_only
rec->has_args = cast_in::args_pos >= 0;
rec->has_kwargs = cast_in::has_kwargs;
/* Process any user-provided function attributes */
process_attributes<Extra...>::init(extra..., rec);
{
constexpr bool has_kw_only_args = any_of<std::is_same<kw_only, Extra>...>::value,
has_pos_only_args = any_of<std::is_same<pos_only, Extra>...>::value,
has_arg_annotations = any_of<is_keyword<Extra>...>::value;
static_assert(has_arg_annotations || !has_kw_only_args,
"py::kw_only requires the use of argument annotations");
static_assert(has_arg_annotations || !has_pos_only_args,
"py::pos_only requires the use of argument annotations (for docstrings "
"and aligning the annotations to the argument)");
static_assert(constexpr_sum(is_kw_only<Extra>::value...) <= 1,
"py::kw_only may be specified only once");
static_assert(constexpr_sum(is_pos_only<Extra>::value...) <= 1,
"py::pos_only may be specified only once");
constexpr auto kw_only_pos = constexpr_first<is_kw_only, Extra...>();
constexpr auto pos_only_pos = constexpr_first<is_pos_only, Extra...>();
static_assert(!(has_kw_only_args && has_pos_only_args) || pos_only_pos < kw_only_pos,
"py::pos_only must come before py::kw_only");
}
/* Generate a readable signature describing the function's arguments and return
value types */
static constexpr auto signature
= const_name("(") + cast_in::arg_names + const_name(") -> ") + cast_out::name;
PYBIND11_DESCR_CONSTEXPR auto types = decltype(signature)::types();
/* Register the function with Python from generic (non-templated) code */
// Pass on the ownership over the `unique_rec` to `initialize_generic`. `rec` stays valid.
initialize_generic(std::move(unique_rec), signature.text, types.data(), sizeof...(Args));
/* Stash some additional information used by an important optimization in 'functional.h' */
using FunctionType = Return (*)(Args...);
constexpr bool is_function_ptr
= std::is_convertible<Func, FunctionType>::value && sizeof(capture) == sizeof(void *);
if (is_function_ptr) {
rec->is_stateless = true;
rec->data[1]
= const_cast<void *>(reinterpret_cast<const void *>(&typeid(FunctionType)));
}
}
// Utility class that keeps track of all duplicated strings, and cleans them up in its
// destructor, unless they are released. Basically a RAII-solution to deal with exceptions
// along the way.
class strdup_guard {
public:
strdup_guard() = default;
strdup_guard(const strdup_guard &) = delete;
strdup_guard &operator=(const strdup_guard &) = delete;
~strdup_guard() {
for (auto *s : strings) {
std::free(s);
}
}
char *operator()(const char *s) {
auto *t = PYBIND11_COMPAT_STRDUP(s);
strings.push_back(t);
return t;
}
void release() { strings.clear(); }
private:
std::vector<char *> strings;
};
/// Register a function call with Python (generic non-templated code goes here)
void initialize_generic(unique_function_record &&unique_rec,
const char *text,
const std::type_info *const *types,
size_t args) {
// Do NOT receive `unique_rec` by value. If this function fails to move out the unique_ptr,
// we do not want this to destruct the pointer. `initialize` (the caller) still relies on
// the pointee being alive after this call. Only move out if a `capsule` is going to keep
// it alive.
auto *rec = unique_rec.get();
// Keep track of strdup'ed strings, and clean them up as long as the function's capsule
// has not taken ownership yet (when `unique_rec.release()` is called).
// Note: This cannot easily be fixed by a `unique_ptr` with custom deleter, because the
// strings are only referenced before strdup'ing. So only *after* the following block could
// `destruct` safely be called, but even then, `repr` could still throw in the middle of
// copying all strings.
strdup_guard guarded_strdup;
/* Create copies of all referenced C-style strings */
rec->name = guarded_strdup(rec->name ? rec->name : "");
if (rec->doc) {
rec->doc = guarded_strdup(rec->doc);
}
for (auto &a : rec->args) {
if (a.name) {
a.name = guarded_strdup(a.name);
}
if (a.descr) {
a.descr = guarded_strdup(a.descr);
} else if (a.value) {
a.descr = guarded_strdup(repr(a.value).cast<std::string>().c_str());
}
}
rec->is_constructor = (std::strcmp(rec->name, "__init__") == 0)
|| (std::strcmp(rec->name, "__setstate__") == 0);
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES) && !defined(PYBIND11_DISABLE_NEW_STYLE_INIT_WARNING)
if (rec->is_constructor && !rec->is_new_style_constructor) {
const auto class_name
= detail::get_fully_qualified_tp_name((PyTypeObject *) rec->scope.ptr());
const auto func_name = std::string(rec->name);
PyErr_WarnEx(PyExc_FutureWarning,
("pybind11-bound class '" + class_name
+ "' is using an old-style "
"placement-new '"
+ func_name
+ "' which has been deprecated. See "
"the upgrade guide in pybind11's docs. This message is only visible "
"when compiled in debug mode.")
.c_str(),
0);
}
#endif
/* Generate a proper function signature */
std::string signature;
size_t type_index = 0, arg_index = 0;
bool is_starred = false;
for (const auto *pc = text; *pc != '\0'; ++pc) {
const auto c = *pc;
if (c == '{') {
// Write arg name for everything except *args and **kwargs.
is_starred = *(pc + 1) == '*';
if (is_starred) {
continue;
}
// Separator for keyword-only arguments, placed before the kw
// arguments start (unless we are already putting an *args)
if (!rec->has_args && arg_index == rec->nargs_pos) {
signature += "*, ";
}
if (arg_index < rec->args.size() && rec->args[arg_index].name) {
signature += rec->args[arg_index].name;
} else if (arg_index == 0 && rec->is_method) {
signature += "self";
} else {
signature += "arg" + std::to_string(arg_index - (rec->is_method ? 1 : 0));
}
signature += ": ";
} else if (c == '}') {
// Write default value if available.
if (!is_starred && arg_index < rec->args.size() && rec->args[arg_index].descr) {
signature += " = ";
signature += detail::replace_newlines_and_squash(rec->args[arg_index].descr);
}
// Separator for positional-only arguments (placed after the
// argument, rather than before like *
if (rec->nargs_pos_only > 0 && (arg_index + 1) == rec->nargs_pos_only) {
signature += ", /";
}
if (!is_starred) {
arg_index++;
}
} else if (c == '%') {
const std::type_info *t = types[type_index++];
if (!t) {
pybind11_fail("Internal error while parsing type signature (1)");
}
if (auto *tinfo = detail::get_type_info(*t)) {
handle th((PyObject *) tinfo->type);
signature += th.attr("__module__").cast<std::string>() + "."
+ th.attr("__qualname__").cast<std::string>();
} else if (rec->is_new_style_constructor && arg_index == 0) {
// A new-style `__init__` takes `self` as `value_and_holder`.
// Rewrite it to the proper class type.
signature += rec->scope.attr("__module__").cast<std::string>() + "."
+ rec->scope.attr("__qualname__").cast<std::string>();
} else {
signature += detail::quote_cpp_type_name(detail::clean_type_id(t->name()));
}
} else {
signature += c;
}
}
if (arg_index != args - rec->has_args - rec->has_kwargs || types[type_index] != nullptr) {
pybind11_fail("Internal error while parsing type signature (2)");
}
rec->signature = guarded_strdup(signature.c_str());
rec->args.shrink_to_fit();
rec->nargs = (std::uint16_t) args;
if (rec->sibling && PYBIND11_INSTANCE_METHOD_CHECK(rec->sibling.ptr())) {
rec->sibling = PYBIND11_INSTANCE_METHOD_GET_FUNCTION(rec->sibling.ptr());
}
detail::function_record *chain = nullptr, *chain_start = rec;
if (rec->sibling) {
if (PyCFunction_Check(rec->sibling.ptr())) {
auto *self = PyCFunction_GET_SELF(rec->sibling.ptr());
if (!isinstance<capsule>(self)) {
chain = nullptr;
} else {
auto rec_capsule = reinterpret_borrow<capsule>(self);
if (detail::is_function_record_capsule(rec_capsule)) {
chain = rec_capsule.get_pointer<detail::function_record>();
/* Never append a method to an overload chain of a parent class;
instead, hide the parent's overloads in this case */
if (!chain->scope.is(rec->scope)) {
chain = nullptr;
}
} else {
chain = nullptr;
}
}
}
// Don't trigger for things like the default __init__, which are wrapper_descriptors
// that we are intentionally replacing
else if (!rec->sibling.is_none() && rec->name[0] != '_') {
pybind11_fail("Cannot overload existing non-function object \""
+ std::string(rec->name) + "\" with a function of the same name");
}
}
if (!chain) {
/* No existing overload was found, create a new function object */
rec->def = new PyMethodDef();
std::memset(rec->def, 0, sizeof(PyMethodDef));
rec->def->ml_name = rec->name;
rec->def->ml_meth
= reinterpret_cast<PyCFunction>(reinterpret_cast<void (*)()>(dispatcher));
rec->def->ml_flags = METH_VARARGS | METH_KEYWORDS;
capsule rec_capsule(unique_rec.release(),
detail::get_function_record_capsule_name(),
[](void *ptr) { destruct((detail::function_record *) ptr); });
guarded_strdup.release();
object scope_module;
if (rec->scope) {
if (hasattr(rec->scope, "__module__")) {
scope_module = rec->scope.attr("__module__");
} else if (hasattr(rec->scope, "__name__")) {
scope_module = rec->scope.attr("__name__");
}
}
m_ptr = PyCFunction_NewEx(rec->def, rec_capsule.ptr(), scope_module.ptr());
if (!m_ptr) {
pybind11_fail("cpp_function::cpp_function(): Could not allocate function object");
}
} else {
/* Append at the beginning or end of the overload chain */
m_ptr = rec->sibling.ptr();
inc_ref();
if (chain->is_method != rec->is_method) {
pybind11_fail(
"overloading a method with both static and instance methods is not supported; "
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
"#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for more "
"details"
#else
"error while attempting to bind "
+ std::string(rec->is_method ? "instance" : "static") + " method "
+ std::string(pybind11::str(rec->scope.attr("__name__"))) + "."
+ std::string(rec->name) + signature
#endif
);
}
if (rec->prepend) {
// Beginning of chain; we need to replace the capsule's current head-of-the-chain
// pointer with this one, then make this one point to the previous head of the
// chain.
chain_start = rec;
rec->next = chain;
auto rec_capsule
= reinterpret_borrow<capsule>(((PyCFunctionObject *) m_ptr)->m_self);
rec_capsule.set_pointer(unique_rec.release());
guarded_strdup.release();
} else {
// Or end of chain (normal behavior)
chain_start = chain;
while (chain->next) {
chain = chain->next;
}
chain->next = unique_rec.release();
guarded_strdup.release();
}
}
std::string signatures;
int index = 0;
/* Create a nice pydoc rec including all signatures and
docstrings of the functions in the overload chain */
if (chain && options::show_function_signatures()
&& std::strcmp(rec->name, "_pybind11_conduit_v1_") != 0) {
// First a generic signature
signatures += rec->name;
signatures += "(*args, **kwargs)\n";
signatures += "Overloaded function.\n\n";
}
// Then specific overload signatures
bool first_user_def = true;
for (auto *it = chain_start; it != nullptr; it = it->next) {
if (options::show_function_signatures()
&& std::strcmp(rec->name, "_pybind11_conduit_v1_") != 0) {
if (index > 0) {
signatures += '\n';
}
if (chain) {
signatures += std::to_string(++index) + ". ";
}
signatures += rec->name;
signatures += it->signature;
signatures += '\n';
}
if (it->doc && it->doc[0] != '\0' && options::show_user_defined_docstrings()) {
// If we're appending another docstring, and aren't printing function signatures,
// we need to append a newline first:
if (!options::show_function_signatures()) {
if (first_user_def) {
first_user_def = false;
} else {
signatures += '\n';
}
}
if (options::show_function_signatures()) {
signatures += '\n';
}
signatures += it->doc;
if (options::show_function_signatures()) {
signatures += '\n';
}
}
}
/* Install docstring */
auto *func = (PyCFunctionObject *) m_ptr;
std::free(const_cast<char *>(func->m_ml->ml_doc));
// Install docstring if it's non-empty (when at least one option is enabled)
func->m_ml->ml_doc
= signatures.empty() ? nullptr : PYBIND11_COMPAT_STRDUP(signatures.c_str());
if (rec->is_method) {
m_ptr = PYBIND11_INSTANCE_METHOD_NEW(m_ptr, rec->scope.ptr());
if (!m_ptr) {
pybind11_fail(
"cpp_function::cpp_function(): Could not allocate instance method object");
}
Py_DECREF(func);
}
}
/// When a cpp_function is GCed, release any memory allocated by pybind11
static void destruct(detail::function_record *rec, bool free_strings = true) {
// If on Python 3.9, check the interpreter "MICRO" (patch) version.
// If this is running on 3.9.0, we have to work around a bug.
#if !defined(PYPY_VERSION) && PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 9
static bool is_zero = Py_GetVersion()[4] == '0';
#endif
while (rec) {
detail::function_record *next = rec->next;
if (rec->free_data) {
rec->free_data(rec);
}
// During initialization, these strings might not have been copied yet,
// so they cannot be freed. Once the function has been created, they can.
// Check `make_function_record` for more details.
if (free_strings) {
std::free((char *) rec->name);
std::free((char *) rec->doc);
std::free((char *) rec->signature);
for (auto &arg : rec->args) {
std::free(const_cast<char *>(arg.name));
std::free(const_cast<char *>(arg.descr));
}
}
for (auto &arg : rec->args) {
arg.value.dec_ref();
}
if (rec->def) {
std::free(const_cast<char *>(rec->def->ml_doc));
// Python 3.9.0 decref's these in the wrong order; rec->def
// If loaded on 3.9.0, let these leak (use Python 3.9.1 at runtime to fix)
// See https://github.com/python/cpython/pull/22670
#if !defined(PYPY_VERSION) && PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 9
if (!is_zero) {
delete rec->def;
}
#else
delete rec->def;
#endif
}
delete rec;
rec = next;
}
}
/// Main dispatch logic for calls to functions bound using pybind11
static PyObject *dispatcher(PyObject *self, PyObject *args_in, PyObject *kwargs_in) {
using namespace detail;
assert(isinstance<capsule>(self));
/* Iterator over the list of potentially admissible overloads */
const function_record *overloads = reinterpret_cast<function_record *>(
PyCapsule_GetPointer(self, get_function_record_capsule_name())),
*current_overload = overloads;
assert(overloads != nullptr);
/* Need to know how many arguments + keyword arguments there are to pick the right
overload */
const auto n_args_in = (size_t) PyTuple_GET_SIZE(args_in);
handle parent = n_args_in > 0 ? PyTuple_GET_ITEM(args_in, 0) : nullptr,
result = PYBIND11_TRY_NEXT_OVERLOAD;
auto self_value_and_holder = value_and_holder();
if (overloads->is_constructor) {
if (!parent
|| !PyObject_TypeCheck(parent.ptr(), (PyTypeObject *) overloads->scope.ptr())) {
set_error(PyExc_TypeError,
"__init__(self, ...) called with invalid or missing `self` argument");
return nullptr;
}
auto *const tinfo = get_type_info((PyTypeObject *) overloads->scope.ptr());
auto *const pi = reinterpret_cast<instance *>(parent.ptr());
self_value_and_holder = pi->get_value_and_holder(tinfo, true);
// If this value is already registered it must mean __init__ is invoked multiple times;
// we really can't support that in C++, so just ignore the second __init__.
if (self_value_and_holder.instance_registered()) {
return none().release().ptr();
}
}
try {
// We do this in two passes: in the first pass, we load arguments with `convert=false`;
// in the second, we allow conversion (except for arguments with an explicit
// py::arg().noconvert()). This lets us prefer calls without conversion, with
// conversion as a fallback.
std::vector<function_call> second_pass;
// However, if there are no overloads, we can just skip the no-convert pass entirely
const bool overloaded
= current_overload != nullptr && current_overload->next != nullptr;
for (; current_overload != nullptr; current_overload = current_overload->next) {
/* For each overload:
1. Copy all positional arguments we were given, also checking to make sure that
named positional arguments weren't *also* specified via kwarg.
2. If we weren't given enough, try to make up the omitted ones by checking
whether they were provided by a kwarg matching the `py::arg("name")` name. If
so, use it (and remove it from kwargs); if not, see if the function binding
provided a default that we can use.
3. Ensure that either all keyword arguments were "consumed", or that the
function takes a kwargs argument to accept unconsumed kwargs.
4. Any positional arguments still left get put into a tuple (for args), and any
leftover kwargs get put into a dict.
5. Pack everything into a vector; if we have py::args or py::kwargs, they are an
extra tuple or dict at the end of the positional arguments.
6. Call the function call dispatcher (function_record::impl)
If one of these fail, move on to the next overload and keep trying until we get
a result other than PYBIND11_TRY_NEXT_OVERLOAD.
*/
const function_record &func = *current_overload;
size_t num_args = func.nargs; // Number of positional arguments that we need
if (func.has_args) {
--num_args; // (but don't count py::args
}
if (func.has_kwargs) {
--num_args; // or py::kwargs)
}
size_t pos_args = func.nargs_pos;
if (!func.has_args && n_args_in > pos_args) {
continue; // Too many positional arguments for this overload
}
if (n_args_in < pos_args && func.args.size() < pos_args) {
continue; // Not enough positional arguments given, and not enough defaults to
// fill in the blanks
}
function_call call(func, parent);
// Protect std::min with parentheses
size_t args_to_copy = (std::min)(pos_args, n_args_in);
size_t args_copied = 0;
// 0. Inject new-style `self` argument
if (func.is_new_style_constructor) {
// The `value` may have been preallocated by an old-style `__init__`
// if it was a preceding candidate for overload resolution.
if (self_value_and_holder) {
self_value_and_holder.type->dealloc(self_value_and_holder);
}
call.init_self = PyTuple_GET_ITEM(args_in, 0);
call.args.emplace_back(reinterpret_cast<PyObject *>(&self_value_and_holder));
call.args_convert.push_back(false);
++args_copied;
}
// 1. Copy any position arguments given.
bool bad_arg = false;
for (; args_copied < args_to_copy; ++args_copied) {
const argument_record *arg_rec
= args_copied < func.args.size() ? &func.args[args_copied] : nullptr;
if (kwargs_in && arg_rec && arg_rec->name
&& dict_getitemstring(kwargs_in, arg_rec->name)) {
bad_arg = true;
break;
}
handle arg(PyTuple_GET_ITEM(args_in, args_copied));
if (arg_rec && !arg_rec->none && arg.is_none()) {
bad_arg = true;
break;
}
call.args.push_back(arg);
call.args_convert.push_back(arg_rec ? arg_rec->convert : true);
}
if (bad_arg) {
continue; // Maybe it was meant for another overload (issue #688)
}
// Keep track of how many position args we copied out in case we need to come back
// to copy the rest into a py::args argument.
size_t positional_args_copied = args_copied;
// We'll need to copy this if we steal some kwargs for defaults
dict kwargs = reinterpret_borrow<dict>(kwargs_in);
// 1.5. Fill in any missing pos_only args from defaults if they exist
if (args_copied < func.nargs_pos_only) {
for (; args_copied < func.nargs_pos_only; ++args_copied) {
const auto &arg_rec = func.args[args_copied];
handle value;
if (arg_rec.value) {
value = arg_rec.value;
}
if (value) {
call.args.push_back(value);
call.args_convert.push_back(arg_rec.convert);
} else {
break;
}
}
if (args_copied < func.nargs_pos_only) {
continue; // Not enough defaults to fill the positional arguments
}
}
// 2. Check kwargs and, failing that, defaults that may help complete the list
if (args_copied < num_args) {
bool copied_kwargs = false;
for (; args_copied < num_args; ++args_copied) {
const auto &arg_rec = func.args[args_copied];
handle value;
if (kwargs_in && arg_rec.name) {
value = dict_getitemstring(kwargs.ptr(), arg_rec.name);
}
if (value) {
// Consume a kwargs value
if (!copied_kwargs) {
kwargs = reinterpret_steal<dict>(PyDict_Copy(kwargs.ptr()));
copied_kwargs = true;
}
if (PyDict_DelItemString(kwargs.ptr(), arg_rec.name) == -1) {
throw error_already_set();
}
} else if (arg_rec.value) {
value = arg_rec.value;
}
if (!arg_rec.none && value.is_none()) {
break;
}
if (value) {
// If we're at the py::args index then first insert a stub for it to be
// replaced later
if (func.has_args && call.args.size() == func.nargs_pos) {
call.args.push_back(none());
}
call.args.push_back(value);
call.args_convert.push_back(arg_rec.convert);
} else {
break;
}
}
if (args_copied < num_args) {
continue; // Not enough arguments, defaults, or kwargs to fill the
// positional arguments
}
}
// 3. Check everything was consumed (unless we have a kwargs arg)
if (kwargs && !kwargs.empty() && !func.has_kwargs) {
continue; // Unconsumed kwargs, but no py::kwargs argument to accept them
}
// 4a. If we have a py::args argument, create a new tuple with leftovers
if (func.has_args) {
tuple extra_args;
if (args_to_copy == 0) {
// We didn't copy out any position arguments from the args_in tuple, so we
// can reuse it directly without copying:
extra_args = reinterpret_borrow<tuple>(args_in);
} else if (positional_args_copied >= n_args_in) {
extra_args = tuple(0);
} else {
size_t args_size = n_args_in - positional_args_copied;
extra_args = tuple(args_size);
for (size_t i = 0; i < args_size; ++i) {
extra_args[i] = PyTuple_GET_ITEM(args_in, positional_args_copied + i);
}
}
if (call.args.size() <= func.nargs_pos) {
call.args.push_back(extra_args);
} else {
call.args[func.nargs_pos] = extra_args;
}
call.args_convert.push_back(false);
call.args_ref = std::move(extra_args);
}
// 4b. If we have a py::kwargs, pass on any remaining kwargs
if (func.has_kwargs) {
if (!kwargs.ptr()) {
kwargs = dict(); // If we didn't get one, send an empty one
}
call.args.push_back(kwargs);
call.args_convert.push_back(false);
call.kwargs_ref = std::move(kwargs);
}
// 5. Put everything in a vector. Not technically step 5, we've been building it
// in `call.args` all along.
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
if (call.args.size() != func.nargs || call.args_convert.size() != func.nargs) {
pybind11_fail("Internal error: function call dispatcher inserted wrong number "
"of arguments!");
}
#endif
std::vector<bool> second_pass_convert;
if (overloaded) {
// We're in the first no-convert pass, so swap out the conversion flags for a
// set of all-false flags. If the call fails, we'll swap the flags back in for
// the conversion-allowed call below.
second_pass_convert.resize(func.nargs, false);
call.args_convert.swap(second_pass_convert);
}
// 6. Call the function.
try {
loader_life_support guard{};
result = func.impl(call);
} catch (reference_cast_error &) {
result = PYBIND11_TRY_NEXT_OVERLOAD;
}
if (result.ptr() != PYBIND11_TRY_NEXT_OVERLOAD) {
break;
}
if (overloaded) {
// The (overloaded) call failed; if the call has at least one argument that
// permits conversion (i.e. it hasn't been explicitly specified `.noconvert()`)
// then add this call to the list of second pass overloads to try.
for (size_t i = func.is_method ? 1 : 0; i < pos_args; i++) {
if (second_pass_convert[i]) {
// Found one: swap the converting flags back in and store the call for
// the second pass.
call.args_convert.swap(second_pass_convert);
second_pass.push_back(std::move(call));
break;
}
}
}
}
if (overloaded && !second_pass.empty() && result.ptr() == PYBIND11_TRY_NEXT_OVERLOAD) {
// The no-conversion pass finished without success, try again with conversion
// allowed
for (auto &call : second_pass) {
try {
loader_life_support guard{};
result = call.func.impl(call);
} catch (reference_cast_error &) {
result = PYBIND11_TRY_NEXT_OVERLOAD;
}
if (result.ptr() != PYBIND11_TRY_NEXT_OVERLOAD) {
// The error reporting logic below expects 'current_overload' to be valid,
// as it would be if we'd encountered this failure in the first-pass loop.
if (!result) {
current_overload = &call.func;
}
break;
}
}
}
} catch (error_already_set &e) {
e.restore();
return nullptr;
#ifdef __GLIBCXX__
} catch (abi::__forced_unwind &) {
throw;
#endif
} catch (...) {
try_translate_exceptions();
return nullptr;
}
auto append_note_if_missing_header_is_suspected = [](std::string &msg) {
if (msg.find("std::") != std::string::npos) {
msg += "\n\n"
"Did you forget to `#include <pybind11/stl.h>`? Or <pybind11/complex.h>,\n"
"<pybind11/functional.h>, <pybind11/chrono.h>, etc. Some automatic\n"
"conversions are optional and require extra headers to be included\n"
"when compiling your pybind11 module.";
}
};
if (result.ptr() == PYBIND11_TRY_NEXT_OVERLOAD) {
if (overloads->is_operator) {
return handle(Py_NotImplemented).inc_ref().ptr();
}
std::string msg = std::string(overloads->name) + "(): incompatible "
+ std::string(overloads->is_constructor ? "constructor" : "function")
+ " arguments. The following argument types are supported:\n";
int ctr = 0;
for (const function_record *it2 = overloads; it2 != nullptr; it2 = it2->next) {
msg += " " + std::to_string(++ctr) + ". ";
bool wrote_sig = false;
if (overloads->is_constructor) {
// For a constructor, rewrite `(self: Object, arg0, ...) -> NoneType` as
// `Object(arg0, ...)`
std::string sig = it2->signature;
size_t start = sig.find('(') + 7; // skip "(self: "
if (start < sig.size()) {
// End at the , for the next argument
size_t end = sig.find(", "), next = end + 2;
size_t ret = sig.rfind(" -> ");
// Or the ), if there is no comma:
if (end >= sig.size()) {
next = end = sig.find(')');
}
if (start < end && next < sig.size()) {
msg.append(sig, start, end - start);
msg += '(';
msg.append(sig, next, ret - next);
wrote_sig = true;
}
}
}
if (!wrote_sig) {
msg += it2->signature;
}
msg += '\n';
}
msg += "\nInvoked with: ";
auto args_ = reinterpret_borrow<tuple>(args_in);
bool some_args = false;
for (size_t ti = overloads->is_constructor ? 1 : 0; ti < args_.size(); ++ti) {
if (!some_args) {
some_args = true;
} else {
msg += ", ";
}
try {
msg += pybind11::repr(args_[ti]);
} catch (const error_already_set &) {
msg += "<repr raised Error>";
}
}
if (kwargs_in) {
auto kwargs = reinterpret_borrow<dict>(kwargs_in);
if (!kwargs.empty()) {
if (some_args) {
msg += "; ";
}
msg += "kwargs: ";
bool first = true;
for (const auto &kwarg : kwargs) {
if (first) {
first = false;
} else {
msg += ", ";
}
msg += pybind11::str("{}=").format(kwarg.first);
try {
msg += pybind11::repr(kwarg.second);
} catch (const error_already_set &) {
msg += "<repr raised Error>";
}
}
}
}
append_note_if_missing_header_is_suspected(msg);
// Attach additional error info to the exception if supported
if (PyErr_Occurred()) {
// #HelpAppreciated: unit test coverage for this branch.
raise_from(PyExc_TypeError, msg.c_str());
return nullptr;
}
set_error(PyExc_TypeError, msg.c_str());
return nullptr;
}
if (!result) {
std::string msg = "Unable to convert function return value to a "
"Python type! The signature was\n\t";
assert(current_overload != nullptr);
msg += current_overload->signature;
append_note_if_missing_header_is_suspected(msg);
// Attach additional error info to the exception if supported
if (PyErr_Occurred()) {
raise_from(PyExc_TypeError, msg.c_str());
return nullptr;
}
set_error(PyExc_TypeError, msg.c_str());
return nullptr;
}
if (overloads->is_constructor && !self_value_and_holder.holder_constructed()) {
auto *pi = reinterpret_cast<instance *>(parent.ptr());
self_value_and_holder.type->init_instance(pi, nullptr);
}
return result.ptr();
}
};
PYBIND11_NAMESPACE_BEGIN(detail)
template <>
struct handle_type_name<cpp_function> {
static constexpr auto name = const_name("Callable");
};
PYBIND11_NAMESPACE_END(detail)
// Use to activate Py_MOD_GIL_NOT_USED.
class mod_gil_not_used {
public:
explicit mod_gil_not_used(bool flag = true) : flag_(flag) {}
bool flag() const { return flag_; }
private:
bool flag_;
};
/// Wrapper for Python extension modules
class module_ : public object {
public:
PYBIND11_OBJECT_DEFAULT(module_, object, PyModule_Check)
/// Create a new top-level Python module with the given name and docstring
PYBIND11_DEPRECATED("Use PYBIND11_MODULE or module_::create_extension_module instead")
explicit module_(const char *name, const char *doc = nullptr) {
*this = create_extension_module(name, doc, new PyModuleDef());
}
/** \rst
Create Python binding for a new function within the module scope. ``Func``
can be a plain C++ function, a function pointer, or a lambda function. For
details on the ``Extra&& ... extra`` argument, see section :ref:`extras`.
\endrst */
template <typename Func, typename... Extra>
module_ &def(const char *name_, Func &&f, const Extra &...extra) {
cpp_function func(std::forward<Func>(f),
name(name_),
scope(*this),
sibling(getattr(*this, name_, none())),
extra...);
// NB: allow overwriting here because cpp_function sets up a chain with the intention of
// overwriting (and has already checked internally that it isn't overwriting
// non-functions).
add_object(name_, func, true /* overwrite */);
return *this;
}
/** \rst
Create and return a new Python submodule with the given name and docstring.
This also works recursively, i.e.
.. code-block:: cpp
py::module_ m("example", "pybind11 example plugin");
py::module_ m2 = m.def_submodule("sub", "A submodule of 'example'");
py::module_ m3 = m2.def_submodule("subsub", "A submodule of 'example.sub'");
\endrst */
module_ def_submodule(const char *name, const char *doc = nullptr) {
const char *this_name = PyModule_GetName(m_ptr);
if (this_name == nullptr) {
throw error_already_set();
}
std::string full_name = std::string(this_name) + '.' + name;
handle submodule = PyImport_AddModule(full_name.c_str());
if (!submodule) {
throw error_already_set();
}
auto result = reinterpret_borrow<module_>(submodule);
if (doc && options::show_user_defined_docstrings()) {
result.attr("__doc__") = pybind11::str(doc);
}
attr(name) = result;
return result;
}
/// Import and return a module or throws `error_already_set`.
static module_ import(const char *name) {
PyObject *obj = PyImport_ImportModule(name);
if (!obj) {
throw error_already_set();
}
return reinterpret_steal<module_>(obj);
}
/// Reload the module or throws `error_already_set`.
void reload() {
PyObject *obj = PyImport_ReloadModule(ptr());
if (!obj) {
throw error_already_set();
}
*this = reinterpret_steal<module_>(obj);
}
/** \rst
Adds an object to the module using the given name. Throws if an object with the given name
already exists.
``overwrite`` should almost always be false: attempting to overwrite objects that pybind11
has established will, in most cases, break things.
\endrst */
PYBIND11_NOINLINE void add_object(const char *name, handle obj, bool overwrite = false) {
if (!overwrite && hasattr(*this, name)) {
pybind11_fail(
"Error during initialization: multiple incompatible definitions with name \""
+ std::string(name) + "\"");
}
PyModule_AddObject(ptr(), name, obj.inc_ref().ptr() /* steals a reference */);
}
using module_def = PyModuleDef; // TODO: Can this be removed (it was needed only for Python 2)?
/** \rst
Create a new top-level module that can be used as the main module of a C extension.
``def`` should point to a statically allocated module_def.
\endrst */
static module_ create_extension_module(const char *name,
const char *doc,
module_def *def,
mod_gil_not_used gil_not_used
= mod_gil_not_used(false)) {
// module_def is PyModuleDef
// Placement new (not an allocation).
def = new (def)
PyModuleDef{/* m_base */ PyModuleDef_HEAD_INIT,
/* m_name */ name,
/* m_doc */ options::show_user_defined_docstrings() ? doc : nullptr,
/* m_size */ -1,
/* m_methods */ nullptr,
/* m_slots */ nullptr,
/* m_traverse */ nullptr,
/* m_clear */ nullptr,
/* m_free */ nullptr};
auto *m = PyModule_Create(def);
if (m == nullptr) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Internal error in module_::create_extension_module()");
}
if (gil_not_used.flag()) {
#ifdef Py_GIL_DISABLED
PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED);
#endif
}
// TODO: Should be reinterpret_steal for Python 3, but Python also steals it again when
// returned from PyInit_...
// For Python 2, reinterpret_borrow was correct.
return reinterpret_borrow<module_>(m);
}
};
PYBIND11_NAMESPACE_BEGIN(detail)
template <>
struct handle_type_name<module_> {
static constexpr auto name = const_name("module");
};
PYBIND11_NAMESPACE_END(detail)
// When inside a namespace (or anywhere as long as it's not the first item on a line),
// C++20 allows "module" to be used. This is provided for backward compatibility, and for
// simplicity, if someone wants to use py::module for example, that is perfectly safe.
using module = module_;
/// \ingroup python_builtins
/// Return a dictionary representing the global variables in the current execution frame,
/// or ``__main__.__dict__`` if there is no frame (usually when the interpreter is embedded).
inline dict globals() {
#if PY_VERSION_HEX >= 0x030d0000
PyObject *p = PyEval_GetFrameGlobals();
return p ? reinterpret_steal<dict>(p)
: reinterpret_borrow<dict>(module_::import("__main__").attr("__dict__").ptr());
#else
PyObject *p = PyEval_GetGlobals();
return reinterpret_borrow<dict>(p ? p : module_::import("__main__").attr("__dict__").ptr());
#endif
}
template <typename... Args, typename = detail::enable_if_t<args_are_all_keyword_or_ds<Args...>()>>
PYBIND11_DEPRECATED("make_simple_namespace should be replaced with "
"py::module_::import(\"types\").attr(\"SimpleNamespace\") ")
object make_simple_namespace(Args &&...args_) {
return module_::import("types").attr("SimpleNamespace")(std::forward<Args>(args_)...);
}
PYBIND11_NAMESPACE_BEGIN(detail)
/// Generic support for creating new Python heap types
class generic_type : public object {
public:
PYBIND11_OBJECT_DEFAULT(generic_type, object, PyType_Check)
protected:
void initialize(const type_record &rec) {
if (rec.scope && hasattr(rec.scope, "__dict__")
&& rec.scope.attr("__dict__").contains(rec.name)) {
pybind11_fail("generic_type: cannot initialize type \"" + std::string(rec.name)
+ "\": an object with that name is already defined");
}
if ((rec.module_local ? get_local_type_info(*rec.type) : get_global_type_info(*rec.type))
!= nullptr) {
pybind11_fail("generic_type: type \"" + std::string(rec.name)
+ "\" is already registered!");
}
m_ptr = make_new_python_type(rec);
/* Register supplemental type information in C++ dict */
auto *tinfo = new detail::type_info();
tinfo->type = (PyTypeObject *) m_ptr;
tinfo->cpptype = rec.type;
tinfo->type_size = rec.type_size;
tinfo->type_align = rec.type_align;
tinfo->operator_new = rec.operator_new;
tinfo->holder_size_in_ptrs = size_in_ptrs(rec.holder_size);
tinfo->init_instance = rec.init_instance;
tinfo->dealloc = rec.dealloc;
tinfo->simple_type = true;
tinfo->simple_ancestors = true;
tinfo->default_holder = rec.default_holder;
tinfo->module_local = rec.module_local;
with_internals([&](internals &internals) {
auto tindex = std::type_index(*rec.type);
tinfo->direct_conversions = &internals.direct_conversions[tindex];
if (rec.module_local) {
get_local_internals().registered_types_cpp[tindex] = tinfo;
} else {
internals.registered_types_cpp[tindex] = tinfo;
}
internals.registered_types_py[(PyTypeObject *) m_ptr] = {tinfo};
});
if (rec.bases.size() > 1 || rec.multiple_inheritance) {
mark_parents_nonsimple(tinfo->type);
tinfo->simple_ancestors = false;
} else if (rec.bases.size() == 1) {
auto *parent_tinfo = get_type_info((PyTypeObject *) rec.bases[0].ptr());
assert(parent_tinfo != nullptr);
bool parent_simple_ancestors = parent_tinfo->simple_ancestors;
tinfo->simple_ancestors = parent_simple_ancestors;
// The parent can no longer be a simple type if it has MI and has a child
parent_tinfo->simple_type = parent_tinfo->simple_type && parent_simple_ancestors;
}
if (rec.module_local) {
// Stash the local typeinfo and loader so that external modules can access it.
tinfo->module_local_load = &type_caster_generic::local_load;
setattr(m_ptr, PYBIND11_MODULE_LOCAL_ID, capsule(tinfo));
}
}
/// Helper function which tags all parents of a type using mult. inheritance
void mark_parents_nonsimple(PyTypeObject *value) {
auto t = reinterpret_borrow<tuple>(value->tp_bases);
for (handle h : t) {
auto *tinfo2 = get_type_info((PyTypeObject *) h.ptr());
if (tinfo2) {
tinfo2->simple_type = false;
}
mark_parents_nonsimple((PyTypeObject *) h.ptr());
}
}
void install_buffer_funcs(buffer_info *(*get_buffer)(PyObject *, void *),
void *get_buffer_data) {
auto *type = (PyHeapTypeObject *) m_ptr;
auto *tinfo = detail::get_type_info(&type->ht_type);
if (!type->ht_type.tp_as_buffer) {
pybind11_fail("To be able to register buffer protocol support for the type '"
+ get_fully_qualified_tp_name(tinfo->type)
+ "' the associated class<>(..) invocation must "
"include the pybind11::buffer_protocol() annotation!");
}
tinfo->get_buffer = get_buffer;
tinfo->get_buffer_data = get_buffer_data;
}
// rec_func must be set for either fget or fset.
void def_property_static_impl(const char *name,
handle fget,
handle fset,
detail::function_record *rec_func) {
const auto is_static = (rec_func != nullptr) && !(rec_func->is_method && rec_func->scope);
const auto has_doc = (rec_func != nullptr) && (rec_func->doc != nullptr)
&& pybind11::options::show_user_defined_docstrings();
auto property = handle(
(PyObject *) (is_static ? get_internals().static_property_type : &PyProperty_Type));
attr(name) = property(fget.ptr() ? fget : none(),
fset.ptr() ? fset : none(),
/*deleter*/ none(),
pybind11::str(has_doc ? rec_func->doc : ""));
}
};
/// Set the pointer to operator new if it exists. The cast is needed because it can be overloaded.
template <typename T,
typename = void_t<decltype(static_cast<void *(*) (size_t)>(T::operator new))>>
void set_operator_new(type_record *r) {
r->operator_new = &T::operator new;
}
template <typename>
void set_operator_new(...) {}
template <typename T, typename SFINAE = void>
struct has_operator_delete : std::false_type {};
template <typename T>
struct has_operator_delete<T, void_t<decltype(static_cast<void (*)(void *)>(T::operator delete))>>
: std::true_type {};
template <typename T, typename SFINAE = void>
struct has_operator_delete_size : std::false_type {};
template <typename T>
struct has_operator_delete_size<
T,
void_t<decltype(static_cast<void (*)(void *, size_t)>(T::operator delete))>> : std::true_type {
};
/// Call class-specific delete if it exists or global otherwise. Can also be an overload set.
template <typename T, enable_if_t<has_operator_delete<T>::value, int> = 0>
void call_operator_delete(T *p, size_t, size_t) {
T::operator delete(p);
}
template <typename T,
enable_if_t<!has_operator_delete<T>::value && has_operator_delete_size<T>::value, int>
= 0>
void call_operator_delete(T *p, size_t s, size_t) {
T::operator delete(p, s);
}
inline void call_operator_delete(void *p, size_t s, size_t a) {
(void) s;
(void) a;
#if defined(__cpp_aligned_new) && (!defined(_MSC_VER) || _MSC_VER >= 1912)
if (a > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
# ifdef __cpp_sized_deallocation
::operator delete(p, s, std::align_val_t(a));
# else
::operator delete(p, std::align_val_t(a));
# endif
return;
}
#endif
#ifdef __cpp_sized_deallocation
::operator delete(p, s);
#else
::operator delete(p);
#endif
}
inline void add_class_method(object &cls, const char *name_, const cpp_function &cf) {
cls.attr(cf.name()) = cf;
if (std::strcmp(name_, "__eq__") == 0 && !cls.attr("__dict__").contains("__hash__")) {
cls.attr("__hash__") = none();
}
}
PYBIND11_NAMESPACE_END(detail)
/// Given a pointer to a member function, cast it to its `Derived` version.
/// Forward everything else unchanged.
template <typename /*Derived*/, typename F>
auto method_adaptor(F &&f) -> decltype(std::forward<F>(f)) {
return std::forward<F>(f);
}
template <typename Derived, typename Return, typename Class, typename... Args>
auto method_adaptor(Return (Class::*pmf)(Args...)) -> Return (Derived::*)(Args...) {
static_assert(
detail::is_accessible_base_of<Class, Derived>::value,
"Cannot bind an inaccessible base class method; use a lambda definition instead");
return pmf;
}
template <typename Derived, typename Return, typename Class, typename... Args>
auto method_adaptor(Return (Class::*pmf)(Args...) const) -> Return (Derived::*)(Args...) const {
static_assert(
detail::is_accessible_base_of<Class, Derived>::value,
"Cannot bind an inaccessible base class method; use a lambda definition instead");
return pmf;
}
template <typename type_, typename... options>
class class_ : public detail::generic_type {
template <typename T>
using is_holder = detail::is_holder_type<type_, T>;
template <typename T>
using is_subtype = detail::is_strict_base_of<type_, T>;
template <typename T>
using is_base = detail::is_strict_base_of<T, type_>;
// struct instead of using here to help MSVC:
template <typename T>
struct is_valid_class_option : detail::any_of<is_holder<T>, is_subtype<T>, is_base<T>> {};
public:
using type = type_;
using type_alias = detail::exactly_one_t<is_subtype, void, options...>;
constexpr static bool has_alias = !std::is_void<type_alias>::value;
using holder_type = detail::exactly_one_t<is_holder, std::unique_ptr<type>, options...>;
static_assert(detail::all_of<is_valid_class_option<options>...>::value,
"Unknown/invalid class_ template parameters provided");
static_assert(!has_alias || std::is_polymorphic<type>::value,
"Cannot use an alias class with a non-polymorphic type");
PYBIND11_OBJECT(class_, generic_type, PyType_Check)
template <typename... Extra>
class_(handle scope, const char *name, const Extra &...extra) {
using namespace detail;
// MI can only be specified via class_ template options, not constructor parameters
static_assert(
none_of<is_pyobject<Extra>...>::value || // no base class arguments, or:
(constexpr_sum(is_pyobject<Extra>::value...) == 1 && // Exactly one base
constexpr_sum(is_base<options>::value...) == 0 && // no template option bases
// no multiple_inheritance attr
none_of<std::is_same<multiple_inheritance, Extra>...>::value),
"Error: multiple inheritance bases must be specified via class_ template options");
type_record record;
record.scope = scope;
record.name = name;
record.type = &typeid(type);
record.type_size = sizeof(conditional_t<has_alias, type_alias, type>);
record.type_align = alignof(conditional_t<has_alias, type_alias, type> &);
record.holder_size = sizeof(holder_type);
record.init_instance = init_instance;
record.dealloc = dealloc;
record.default_holder = detail::is_instantiation<std::unique_ptr, holder_type>::value;
set_operator_new<type>(&record);
/* Register base classes specified via template arguments to class_, if any */
PYBIND11_EXPAND_SIDE_EFFECTS(add_base<options>(record));
/* Process optional arguments, if any */
process_attributes<Extra...>::init(extra..., &record);
generic_type::initialize(record);
if (has_alias) {
with_internals([&](internals &internals) {
auto &instances = record.module_local ? get_local_internals().registered_types_cpp
: internals.registered_types_cpp;
instances[std::type_index(typeid(type_alias))]
= instances[std::type_index(typeid(type))];
});
}
def("_pybind11_conduit_v1_", cpp_conduit_method);
}
template <typename Base, detail::enable_if_t<is_base<Base>::value, int> = 0>
static void add_base(detail::type_record &rec) {
rec.add_base(typeid(Base), [](void *src) -> void * {
return static_cast<Base *>(reinterpret_cast<type *>(src));
});
}
template <typename Base, detail::enable_if_t<!is_base<Base>::value, int> = 0>
static void add_base(detail::type_record &) {}
template <typename Func, typename... Extra>
class_ &def(const char *name_, Func &&f, const Extra &...extra) {
cpp_function cf(method_adaptor<type>(std::forward<Func>(f)),
name(name_),
is_method(*this),
sibling(getattr(*this, name_, none())),
extra...);
add_class_method(*this, name_, cf);
return *this;
}
template <typename Func, typename... Extra>
class_ &def_static(const char *name_, Func &&f, const Extra &...extra) {
static_assert(!std::is_member_function_pointer<Func>::value,
"def_static(...) called with a non-static member function pointer");
cpp_function cf(std::forward<Func>(f),
name(name_),
scope(*this),
sibling(getattr(*this, name_, none())),
extra...);
auto cf_name = cf.name();
attr(std::move(cf_name)) = staticmethod(std::move(cf));
return *this;
}
template <typename T, typename... Extra, detail::enable_if_t<T::op_enable_if_hook, int> = 0>
class_ &def(const T &op, const Extra &...extra) {
op.execute(*this, extra...);
return *this;
}
template <typename T, typename... Extra, detail::enable_if_t<T::op_enable_if_hook, int> = 0>
class_ &def_cast(const T &op, const Extra &...extra) {
op.execute_cast(*this, extra...);
return *this;
}
template <typename... Args, typename... Extra>
class_ &def(const detail::initimpl::constructor<Args...> &init, const Extra &...extra) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(init);
init.execute(*this, extra...);
return *this;
}
template <typename... Args, typename... Extra>
class_ &def(const detail::initimpl::alias_constructor<Args...> &init, const Extra &...extra) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(init);
init.execute(*this, extra...);
return *this;
}
template <typename... Args, typename... Extra>
class_ &def(detail::initimpl::factory<Args...> &&init, const Extra &...extra) {
std::move(init).execute(*this, extra...);
return *this;
}
template <typename... Args, typename... Extra>
class_ &def(detail::initimpl::pickle_factory<Args...> &&pf, const Extra &...extra) {
std::move(pf).execute(*this, extra...);
return *this;
}
template <typename Func>
class_ &def_buffer(Func &&func) {
struct capture {
Func func;
};
auto *ptr = new capture{std::forward<Func>(func)};
install_buffer_funcs(
[](PyObject *obj, void *ptr) -> buffer_info * {
detail::make_caster<type> caster;
if (!caster.load(obj, false)) {
return nullptr;
}
return new buffer_info(((capture *) ptr)->func(std::move(caster)));
},
ptr);
weakref(m_ptr, cpp_function([ptr](handle wr) {
delete ptr;
wr.dec_ref();
}))
.release();
return *this;
}
template <typename Return, typename Class, typename... Args>
class_ &def_buffer(Return (Class::*func)(Args...)) {
return def_buffer([func](type &obj) { return (obj.*func)(); });
}
template <typename Return, typename Class, typename... Args>
class_ &def_buffer(Return (Class::*func)(Args...) const) {
return def_buffer([func](const type &obj) { return (obj.*func)(); });
}
template <typename C, typename D, typename... Extra>
class_ &def_readwrite(const char *name, D C::*pm, const Extra &...extra) {
static_assert(std::is_same<C, type>::value || std::is_base_of<C, type>::value,
"def_readwrite() requires a class member (or base class member)");
cpp_function fget([pm](const type &c) -> const D & { return c.*pm; }, is_method(*this)),
fset([pm](type &c, const D &value) { c.*pm = value; }, is_method(*this));
def_property(name, fget, fset, return_value_policy::reference_internal, extra...);
return *this;
}
template <typename C, typename D, typename... Extra>
class_ &def_readonly(const char *name, const D C::*pm, const Extra &...extra) {
static_assert(std::is_same<C, type>::value || std::is_base_of<C, type>::value,
"def_readonly() requires a class member (or base class member)");
cpp_function fget([pm](const type &c) -> const D & { return c.*pm; }, is_method(*this));
def_property_readonly(name, fget, return_value_policy::reference_internal, extra...);
return *this;
}
template <typename D, typename... Extra>
class_ &def_readwrite_static(const char *name, D *pm, const Extra &...extra) {
cpp_function fget([pm](const object &) -> const D & { return *pm; }, scope(*this)),
fset([pm](const object &, const D &value) { *pm = value; }, scope(*this));
def_property_static(name, fget, fset, return_value_policy::reference, extra...);
return *this;
}
template <typename D, typename... Extra>
class_ &def_readonly_static(const char *name, const D *pm, const Extra &...extra) {
cpp_function fget([pm](const object &) -> const D & { return *pm; }, scope(*this));
def_property_readonly_static(name, fget, return_value_policy::reference, extra...);
return *this;
}
/// Uses return_value_policy::reference_internal by default
template <typename Getter, typename... Extra>
class_ &def_property_readonly(const char *name, const Getter &fget, const Extra &...extra) {
return def_property_readonly(name,
cpp_function(method_adaptor<type>(fget)),
return_value_policy::reference_internal,
extra...);
}
/// Uses cpp_function's return_value_policy by default
template <typename... Extra>
class_ &
def_property_readonly(const char *name, const cpp_function &fget, const Extra &...extra) {
return def_property(name, fget, nullptr, extra...);
}
/// Uses return_value_policy::reference by default
template <typename Getter, typename... Extra>
class_ &
def_property_readonly_static(const char *name, const Getter &fget, const Extra &...extra) {
return def_property_readonly_static(
name, cpp_function(fget), return_value_policy::reference, extra...);
}
/// Uses cpp_function's return_value_policy by default
template <typename... Extra>
class_ &def_property_readonly_static(const char *name,
const cpp_function &fget,
const Extra &...extra) {
return def_property_static(name, fget, nullptr, extra...);
}
/// Uses return_value_policy::reference_internal by default
template <typename Getter, typename Setter, typename... Extra>
class_ &
def_property(const char *name, const Getter &fget, const Setter &fset, const Extra &...extra) {
return def_property(
name, fget, cpp_function(method_adaptor<type>(fset), is_setter()), extra...);
}
template <typename Getter, typename... Extra>
class_ &def_property(const char *name,
const Getter &fget,
const cpp_function &fset,
const Extra &...extra) {
return def_property(name,
cpp_function(method_adaptor<type>(fget)),
fset,
return_value_policy::reference_internal,
extra...);
}
/// Uses cpp_function's return_value_policy by default
template <typename... Extra>
class_ &def_property(const char *name,
const cpp_function &fget,
const cpp_function &fset,
const Extra &...extra) {
return def_property_static(name, fget, fset, is_method(*this), extra...);
}
/// Uses return_value_policy::reference by default
template <typename Getter, typename... Extra>
class_ &def_property_static(const char *name,
const Getter &fget,
const cpp_function &fset,
const Extra &...extra) {
return def_property_static(
name, cpp_function(fget), fset, return_value_policy::reference, extra...);
}
/// Uses cpp_function's return_value_policy by default
template <typename... Extra>
class_ &def_property_static(const char *name,
const cpp_function &fget,
const cpp_function &fset,
const Extra &...extra) {
static_assert(0 == detail::constexpr_sum(std::is_base_of<arg, Extra>::value...),
"Argument annotations are not allowed for properties");
auto rec_fget = get_function_record(fget), rec_fset = get_function_record(fset);
auto *rec_active = rec_fget;
if (rec_fget) {
char *doc_prev = rec_fget->doc; /* 'extra' field may include a property-specific
documentation string */
detail::process_attributes<Extra...>::init(extra..., rec_fget);
if (rec_fget->doc && rec_fget->doc != doc_prev) {
std::free(doc_prev);
rec_fget->doc = PYBIND11_COMPAT_STRDUP(rec_fget->doc);
}
}
if (rec_fset) {
char *doc_prev = rec_fset->doc;
detail::process_attributes<Extra...>::init(extra..., rec_fset);
if (rec_fset->doc && rec_fset->doc != doc_prev) {
std::free(doc_prev);
rec_fset->doc = PYBIND11_COMPAT_STRDUP(rec_fset->doc);
}
if (!rec_active) {
rec_active = rec_fset;
}
}
def_property_static_impl(name, fget, fset, rec_active);
return *this;
}
private:
/// Initialize holder object, variant 1: object derives from enable_shared_from_this
template <typename T>
static void init_holder(detail::instance *inst,
detail::value_and_holder &v_h,
const holder_type * /* unused */,
const std::enable_shared_from_this<T> * /* dummy */) {
auto sh = std::dynamic_pointer_cast<typename holder_type::element_type>(
detail::try_get_shared_from_this(v_h.value_ptr<type>()));
if (sh) {
new (std::addressof(v_h.holder<holder_type>())) holder_type(std::move(sh));
v_h.set_holder_constructed();
}
if (!v_h.holder_constructed() && inst->owned) {
new (std::addressof(v_h.holder<holder_type>())) holder_type(v_h.value_ptr<type>());
v_h.set_holder_constructed();
}
}
static void init_holder_from_existing(const detail::value_and_holder &v_h,
const holder_type *holder_ptr,
std::true_type /*is_copy_constructible*/) {
new (std::addressof(v_h.holder<holder_type>()))
holder_type(*reinterpret_cast<const holder_type *>(holder_ptr));
}
static void init_holder_from_existing(const detail::value_and_holder &v_h,
const holder_type *holder_ptr,
std::false_type /*is_copy_constructible*/) {
new (std::addressof(v_h.holder<holder_type>()))
holder_type(std::move(*const_cast<holder_type *>(holder_ptr)));
}
/// Initialize holder object, variant 2: try to construct from existing holder object, if
/// possible
static void init_holder(detail::instance *inst,
detail::value_and_holder &v_h,
const holder_type *holder_ptr,
const void * /* dummy -- not enable_shared_from_this<T>) */) {
if (holder_ptr) {
init_holder_from_existing(v_h, holder_ptr, std::is_copy_constructible<holder_type>());
v_h.set_holder_constructed();
} else if (detail::always_construct_holder<holder_type>::value || inst->owned) {
new (std::addressof(v_h.holder<holder_type>())) holder_type(v_h.value_ptr<type>());
v_h.set_holder_constructed();
}
}
/// Performs instance initialization including constructing a holder and registering the known
/// instance. Should be called as soon as the `type` value_ptr is set for an instance. Takes
/// an optional pointer to an existing holder to use; if not specified and the instance is
/// `.owned`, a new holder will be constructed to manage the value pointer.
static void init_instance(detail::instance *inst, const void *holder_ptr) {
auto v_h = inst->get_value_and_holder(detail::get_type_info(typeid(type)));
if (!v_h.instance_registered()) {
register_instance(inst, v_h.value_ptr(), v_h.type);
v_h.set_instance_registered();
}
init_holder(inst, v_h, (const holder_type *) holder_ptr, v_h.value_ptr<type>());
}
/// Deallocates an instance; via holder, if constructed; otherwise via operator delete.
static void dealloc(detail::value_and_holder &v_h) {
// We could be deallocating because we are cleaning up after a Python exception.
// If so, the Python error indicator will be set. We need to clear that before
// running the destructor, in case the destructor code calls more Python.
// If we don't, the Python API will exit with an exception, and pybind11 will
// throw error_already_set from the C++ destructor which is forbidden and triggers
// std::terminate().
error_scope scope;
if (v_h.holder_constructed()) {
v_h.holder<holder_type>().~holder_type();
v_h.set_holder_constructed(false);
} else {
detail::call_operator_delete(
v_h.value_ptr<type>(), v_h.type->type_size, v_h.type->type_align);
}
v_h.value_ptr() = nullptr;
}
static detail::function_record *get_function_record(handle h) {
h = detail::get_function(h);
if (!h) {
return nullptr;
}
handle func_self = PyCFunction_GET_SELF(h.ptr());
if (!func_self) {
throw error_already_set();
}
if (!isinstance<capsule>(func_self)) {
return nullptr;
}
auto cap = reinterpret_borrow<capsule>(func_self);
if (!detail::is_function_record_capsule(cap)) {
return nullptr;
}
return cap.get_pointer<detail::function_record>();
}
};
/// Binds an existing constructor taking arguments Args...
template <typename... Args>
detail::initimpl::constructor<Args...> init() {
return {};
}
/// Like `init<Args...>()`, but the instance is always constructed through the alias class (even
/// when not inheriting on the Python side).
template <typename... Args>
detail::initimpl::alias_constructor<Args...> init_alias() {
return {};
}
/// Binds a factory function as a constructor
template <typename Func, typename Ret = detail::initimpl::factory<Func>>
Ret init(Func &&f) {
return {std::forward<Func>(f)};
}
/// Dual-argument factory function: the first function is called when no alias is needed, the
/// second when an alias is needed (i.e. due to python-side inheritance). Arguments must be
/// identical.
template <typename CFunc, typename AFunc, typename Ret = detail::initimpl::factory<CFunc, AFunc>>
Ret init(CFunc &&c, AFunc &&a) {
return {std::forward<CFunc>(c), std::forward<AFunc>(a)};
}
/// Binds pickling functions `__getstate__` and `__setstate__` and ensures that the type
/// returned by `__getstate__` is the same as the argument accepted by `__setstate__`.
template <typename GetState, typename SetState>
detail::initimpl::pickle_factory<GetState, SetState> pickle(GetState &&g, SetState &&s) {
return {std::forward<GetState>(g), std::forward<SetState>(s)};
}
PYBIND11_NAMESPACE_BEGIN(detail)
inline str enum_name(handle arg) {
dict entries = arg.get_type().attr("__entries");
for (auto kv : entries) {
if (handle(kv.second[int_(0)]).equal(arg)) {
return pybind11::str(kv.first);
}
}
return "???";
}
struct enum_base {
enum_base(const handle &base, const handle &parent) : m_base(base), m_parent(parent) {}
PYBIND11_NOINLINE void init(bool is_arithmetic, bool is_convertible) {
m_base.attr("__entries") = dict();
auto property = handle((PyObject *) &PyProperty_Type);
auto static_property = handle((PyObject *) get_internals().static_property_type);
m_base.attr("__repr__") = cpp_function(
[](const object &arg) -> str {
handle type = type::handle_of(arg);
object type_name = type.attr("__name__");
return pybind11::str("<{}.{}: {}>")
.format(std::move(type_name), enum_name(arg), int_(arg));
},
name("__repr__"),
is_method(m_base));
m_base.attr("name") = property(cpp_function(&enum_name, name("name"), is_method(m_base)));
m_base.attr("__str__") = cpp_function(
[](handle arg) -> str {
object type_name = type::handle_of(arg).attr("__name__");
return pybind11::str("{}.{}").format(std::move(type_name), enum_name(arg));
},
name("__str__"),
is_method(m_base));
if (options::show_enum_members_docstring()) {
m_base.attr("__doc__") = static_property(
cpp_function(
[](handle arg) -> std::string {
std::string docstring;
dict entries = arg.attr("__entries");
if (((PyTypeObject *) arg.ptr())->tp_doc) {
docstring += std::string(
reinterpret_cast<PyTypeObject *>(arg.ptr())->tp_doc);
docstring += "\n\n";
}
docstring += "Members:";
for (auto kv : entries) {
auto key = std::string(pybind11::str(kv.first));
auto comment = kv.second[int_(1)];
docstring += "\n\n ";
docstring += key;
if (!comment.is_none()) {
docstring += " : ";
docstring += pybind11::str(comment).cast<std::string>();
}
}
return docstring;
},
name("__doc__")),
none(),
none(),
"");
}
m_base.attr("__members__") = static_property(cpp_function(
[](handle arg) -> dict {
dict entries = arg.attr("__entries"),
m;
for (auto kv : entries) {
m[kv.first] = kv.second[int_(0)];
}
return m;
},
name("__members__")),
none(),
none(),
"");
#define PYBIND11_ENUM_OP_STRICT(op, expr, strict_behavior) \
m_base.attr(op) = cpp_function( \
[](const object &a, const object &b) { \
if (!type::handle_of(a).is(type::handle_of(b))) \
strict_behavior; /* NOLINT(bugprone-macro-parentheses) */ \
return expr; \
}, \
name(op), \
is_method(m_base), \
arg("other"))
#define PYBIND11_ENUM_OP_CONV(op, expr) \
m_base.attr(op) = cpp_function( \
[](const object &a_, const object &b_) { \
int_ a(a_), b(b_); \
return expr; \
}, \
name(op), \
is_method(m_base), \
arg("other"))
#define PYBIND11_ENUM_OP_CONV_LHS(op, expr) \
m_base.attr(op) = cpp_function( \
[](const object &a_, const object &b) { \
int_ a(a_); \
return expr; \
}, \
name(op), \
is_method(m_base), \
arg("other"))
if (is_convertible) {
PYBIND11_ENUM_OP_CONV_LHS("__eq__", !b.is_none() && a.equal(b));
PYBIND11_ENUM_OP_CONV_LHS("__ne__", b.is_none() || !a.equal(b));
if (is_arithmetic) {
PYBIND11_ENUM_OP_CONV("__lt__", a < b);
PYBIND11_ENUM_OP_CONV("__gt__", a > b);
PYBIND11_ENUM_OP_CONV("__le__", a <= b);
PYBIND11_ENUM_OP_CONV("__ge__", a >= b);
PYBIND11_ENUM_OP_CONV("__and__", a & b);
PYBIND11_ENUM_OP_CONV("__rand__", a & b);
PYBIND11_ENUM_OP_CONV("__or__", a | b);
PYBIND11_ENUM_OP_CONV("__ror__", a | b);
PYBIND11_ENUM_OP_CONV("__xor__", a ^ b);
PYBIND11_ENUM_OP_CONV("__rxor__", a ^ b);
m_base.attr("__invert__")
= cpp_function([](const object &arg) { return ~(int_(arg)); },
name("__invert__"),
is_method(m_base));
}
} else {
PYBIND11_ENUM_OP_STRICT("__eq__", int_(a).equal(int_(b)), return false);
PYBIND11_ENUM_OP_STRICT("__ne__", !int_(a).equal(int_(b)), return true);
if (is_arithmetic) {
#define PYBIND11_THROW throw type_error("Expected an enumeration of matching type!");
PYBIND11_ENUM_OP_STRICT("__lt__", int_(a) < int_(b), PYBIND11_THROW);
PYBIND11_ENUM_OP_STRICT("__gt__", int_(a) > int_(b), PYBIND11_THROW);
PYBIND11_ENUM_OP_STRICT("__le__", int_(a) <= int_(b), PYBIND11_THROW);
PYBIND11_ENUM_OP_STRICT("__ge__", int_(a) >= int_(b), PYBIND11_THROW);
#undef PYBIND11_THROW
}
}
#undef PYBIND11_ENUM_OP_CONV_LHS
#undef PYBIND11_ENUM_OP_CONV
#undef PYBIND11_ENUM_OP_STRICT
m_base.attr("__getstate__") = cpp_function(
[](const object &arg) { return int_(arg); }, name("__getstate__"), is_method(m_base));
m_base.attr("__hash__") = cpp_function(
[](const object &arg) { return int_(arg); }, name("__hash__"), is_method(m_base));
}
PYBIND11_NOINLINE void value(char const *name_, object value, const char *doc = nullptr) {
dict entries = m_base.attr("__entries");
str name(name_);
if (entries.contains(name)) {
std::string type_name = (std::string) str(m_base.attr("__name__"));
throw value_error(std::move(type_name) + ": element \"" + std::string(name_)
+ "\" already exists!");
}
entries[name] = pybind11::make_tuple(value, doc);
m_base.attr(std::move(name)) = std::move(value);
}
PYBIND11_NOINLINE void export_values() {
dict entries = m_base.attr("__entries");
for (auto kv : entries) {
m_parent.attr(kv.first) = kv.second[int_(0)];
}
}
handle m_base;
handle m_parent;
};
template <bool is_signed, size_t length>
struct equivalent_integer {};
template <>
struct equivalent_integer<true, 1> {
using type = int8_t;
};
template <>
struct equivalent_integer<false, 1> {
using type = uint8_t;
};
template <>
struct equivalent_integer<true, 2> {
using type = int16_t;
};
template <>
struct equivalent_integer<false, 2> {
using type = uint16_t;
};
template <>
struct equivalent_integer<true, 4> {
using type = int32_t;
};
template <>
struct equivalent_integer<false, 4> {
using type = uint32_t;
};
template <>
struct equivalent_integer<true, 8> {
using type = int64_t;
};
template <>
struct equivalent_integer<false, 8> {
using type = uint64_t;
};
template <typename IntLike>
using equivalent_integer_t =
typename equivalent_integer<std::is_signed<IntLike>::value, sizeof(IntLike)>::type;
PYBIND11_NAMESPACE_END(detail)
/// Binds C++ enumerations and enumeration classes to Python
template <typename Type>
class enum_ : public class_<Type> {
public:
using Base = class_<Type>;
using Base::attr;
using Base::def;
using Base::def_property_readonly;
using Base::def_property_readonly_static;
using Underlying = typename std::underlying_type<Type>::type;
// Scalar is the integer representation of underlying type
using Scalar = detail::conditional_t<detail::any_of<detail::is_std_char_type<Underlying>,
std::is_same<Underlying, bool>>::value,
detail::equivalent_integer_t<Underlying>,
Underlying>;
template <typename... Extra>
enum_(const handle &scope, const char *name, const Extra &...extra)
: class_<Type>(scope, name, extra...), m_base(*this, scope) {
constexpr bool is_arithmetic = detail::any_of<std::is_same<arithmetic, Extra>...>::value;
constexpr bool is_convertible = std::is_convertible<Type, Underlying>::value;
m_base.init(is_arithmetic, is_convertible);
def(init([](Scalar i) { return static_cast<Type>(i); }), arg("value"));
def_property_readonly("value", [](Type value) { return (Scalar) value; });
def("__int__", [](Type value) { return (Scalar) value; });
def("__index__", [](Type value) { return (Scalar) value; });
attr("__setstate__") = cpp_function(
[](detail::value_and_holder &v_h, Scalar arg) {
detail::initimpl::setstate<Base>(
v_h, static_cast<Type>(arg), Py_TYPE(v_h.inst) != v_h.type->type);
},
detail::is_new_style_constructor(),
pybind11::name("__setstate__"),
is_method(*this),
arg("state"));
}
/// Export enumeration entries into the parent scope
enum_ &export_values() {
m_base.export_values();
return *this;
}
/// Add an enumeration entry
enum_ &value(char const *name, Type value, const char *doc = nullptr) {
m_base.value(name, pybind11::cast(value, return_value_policy::copy), doc);
return *this;
}
private:
detail::enum_base m_base;
};
PYBIND11_NAMESPACE_BEGIN(detail)
PYBIND11_NOINLINE void keep_alive_impl(handle nurse, handle patient) {
if (!nurse || !patient) {
pybind11_fail("Could not activate keep_alive!");
}
if (patient.is_none() || nurse.is_none()) {
return; /* Nothing to keep alive or nothing to be kept alive by */
}
auto tinfo = all_type_info(Py_TYPE(nurse.ptr()));
if (!tinfo.empty()) {
/* It's a pybind-registered type, so we can store the patient in the
* internal list. */
add_patient(nurse.ptr(), patient.ptr());
} else {
/* Fall back to clever approach based on weak references taken from
* Boost.Python. This is not used for pybind-registered types because
* the objects can be destroyed out-of-order in a GC pass. */
cpp_function disable_lifesupport([patient](handle weakref) {
patient.dec_ref();
weakref.dec_ref();
});
weakref wr(nurse, disable_lifesupport);
patient.inc_ref(); /* reference patient and leak the weak reference */
(void) wr.release();
}
}
PYBIND11_NOINLINE void
keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret) {
auto get_arg = [&](size_t n) {
if (n == 0) {
return ret;
}
if (n == 1 && call.init_self) {
return call.init_self;
}
if (n <= call.args.size()) {
return call.args[n - 1];
}
return handle();
};
keep_alive_impl(get_arg(Nurse), get_arg(Patient));
}
inline std::pair<decltype(internals::registered_types_py)::iterator, bool>
all_type_info_get_cache(PyTypeObject *type) {
auto res = with_internals([type](internals &internals) {
return internals
.registered_types_py
#ifdef __cpp_lib_unordered_map_try_emplace
.try_emplace(type);
#else
.emplace(type, std::vector<detail::type_info *>());
#endif
});
if (res.second) {
// New cache entry created; set up a weak reference to automatically remove it if the type
// gets destroyed:
weakref((PyObject *) type, cpp_function([type](handle wr) {
with_internals([type](internals &internals) {
internals.registered_types_py.erase(type);
// TODO consolidate the erasure code in pybind11_meta_dealloc() in class.h
auto &cache = internals.inactive_override_cache;
for (auto it = cache.begin(), last = cache.end(); it != last;) {
if (it->first == reinterpret_cast<PyObject *>(type)) {
it = cache.erase(it);
} else {
++it;
}
}
});
wr.dec_ref();
}))
.release();
}
return res;
}
/* There are a large number of apparently unused template arguments because
* each combination requires a separate py::class_ registration.
*/
template <typename Access,
return_value_policy Policy,
typename Iterator,
typename Sentinel,
typename ValueType,
typename... Extra>
struct iterator_state {
Iterator it;
Sentinel end;
bool first_or_done;
};
// Note: these helpers take the iterator by non-const reference because some
// iterators in the wild can't be dereferenced when const. The & after Iterator
// is required for MSVC < 16.9. SFINAE cannot be reused for result_type due to
// bugs in ICC, NVCC, and PGI compilers. See PR #3293.
template <typename Iterator, typename SFINAE = decltype(*std::declval<Iterator &>())>
struct iterator_access {
using result_type = decltype(*std::declval<Iterator &>());
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
result_type operator()(Iterator &it) const { return *it; }
};
template <typename Iterator, typename SFINAE = decltype((*std::declval<Iterator &>()).first)>
class iterator_key_access {
private:
using pair_type = decltype(*std::declval<Iterator &>());
public:
/* If either the pair itself or the element of the pair is a reference, we
* want to return a reference, otherwise a value. When the decltype
* expression is parenthesized it is based on the value category of the
* expression; otherwise it is the declared type of the pair member.
* The use of declval<pair_type> in the second branch rather than directly
* using *std::declval<Iterator &>() is a workaround for nvcc
* (it's not used in the first branch because going via decltype and back
* through declval does not perfectly preserve references).
*/
using result_type
= conditional_t<std::is_reference<decltype(*std::declval<Iterator &>())>::value,
decltype(((*std::declval<Iterator &>()).first)),
decltype(std::declval<pair_type>().first)>;
result_type operator()(Iterator &it) const { return (*it).first; }
};
template <typename Iterator, typename SFINAE = decltype((*std::declval<Iterator &>()).second)>
class iterator_value_access {
private:
using pair_type = decltype(*std::declval<Iterator &>());
public:
using result_type
= conditional_t<std::is_reference<decltype(*std::declval<Iterator &>())>::value,
decltype(((*std::declval<Iterator &>()).second)),
decltype(std::declval<pair_type>().second)>;
result_type operator()(Iterator &it) const { return (*it).second; }
};
template <typename Access,
return_value_policy Policy,
typename Iterator,
typename Sentinel,
typename ValueType,
typename... Extra>
iterator make_iterator_impl(Iterator first, Sentinel last, Extra &&...extra) {
using state = detail::iterator_state<Access, Policy, Iterator, Sentinel, ValueType, Extra...>;
// TODO: state captures only the types of Extra, not the values
if (!detail::get_type_info(typeid(state), false)) {
class_<state>(handle(), "iterator", pybind11::module_local())
.def("__iter__", [](state &s) -> state & { return s; })
.def(
"__next__",
[](state &s) -> ValueType {
if (!s.first_or_done) {
++s.it;
} else {
s.first_or_done = false;
}
if (s.it == s.end) {
s.first_or_done = true;
throw stop_iteration();
}
return Access()(s.it);
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
},
std::forward<Extra>(extra)...,
Policy);
}
return cast(state{std::forward<Iterator>(first), std::forward<Sentinel>(last), true});
}
PYBIND11_NAMESPACE_END(detail)
/// Makes a python iterator from a first and past-the-end C++ InputIterator.
template <return_value_policy Policy = return_value_policy::reference_internal,
typename Iterator,
typename Sentinel,
typename ValueType = typename detail::iterator_access<Iterator>::result_type,
typename... Extra>
typing::Iterator<ValueType> make_iterator(Iterator first, Sentinel last, Extra &&...extra) {
return detail::make_iterator_impl<detail::iterator_access<Iterator>,
Policy,
Iterator,
Sentinel,
ValueType,
Extra...>(std::forward<Iterator>(first),
std::forward<Sentinel>(last),
std::forward<Extra>(extra)...);
}
/// Makes a python iterator over the keys (`.first`) of a iterator over pairs from a
/// first and past-the-end InputIterator.
template <return_value_policy Policy = return_value_policy::reference_internal,
typename Iterator,
typename Sentinel,
typename KeyType = typename detail::iterator_key_access<Iterator>::result_type,
typename... Extra>
typing::Iterator<KeyType> make_key_iterator(Iterator first, Sentinel last, Extra &&...extra) {
return detail::make_iterator_impl<detail::iterator_key_access<Iterator>,
Policy,
Iterator,
Sentinel,
KeyType,
Extra...>(std::forward<Iterator>(first),
std::forward<Sentinel>(last),
std::forward<Extra>(extra)...);
}
/// Makes a python iterator over the values (`.second`) of a iterator over pairs from a
/// first and past-the-end InputIterator.
template <return_value_policy Policy = return_value_policy::reference_internal,
typename Iterator,
typename Sentinel,
typename ValueType = typename detail::iterator_value_access<Iterator>::result_type,
typename... Extra>
typing::Iterator<ValueType> make_value_iterator(Iterator first, Sentinel last, Extra &&...extra) {
return detail::make_iterator_impl<detail::iterator_value_access<Iterator>,
Policy,
Iterator,
Sentinel,
ValueType,
Extra...>(std::forward<Iterator>(first),
std::forward<Sentinel>(last),
std::forward<Extra>(extra)...);
}
/// Makes an iterator over values of an stl container or other container supporting
/// `std::begin()`/`std::end()`
template <return_value_policy Policy = return_value_policy::reference_internal,
typename Type,
typename ValueType = typename detail::iterator_access<
decltype(std::begin(std::declval<Type &>()))>::result_type,
typename... Extra>
typing::Iterator<ValueType> make_iterator(Type &value, Extra &&...extra) {
return make_iterator<Policy>(
std::begin(value), std::end(value), std::forward<Extra>(extra)...);
}
/// Makes an iterator over the keys (`.first`) of a stl map-like container supporting
/// `std::begin()`/`std::end()`
template <return_value_policy Policy = return_value_policy::reference_internal,
typename Type,
typename KeyType = typename detail::iterator_key_access<
decltype(std::begin(std::declval<Type &>()))>::result_type,
typename... Extra>
typing::Iterator<KeyType> make_key_iterator(Type &value, Extra &&...extra) {
return make_key_iterator<Policy>(
std::begin(value), std::end(value), std::forward<Extra>(extra)...);
}
/// Makes an iterator over the values (`.second`) of a stl map-like container supporting
/// `std::begin()`/`std::end()`
template <return_value_policy Policy = return_value_policy::reference_internal,
typename Type,
typename ValueType = typename detail::iterator_value_access<
decltype(std::begin(std::declval<Type &>()))>::result_type,
typename... Extra>
typing::Iterator<ValueType> make_value_iterator(Type &value, Extra &&...extra) {
return make_value_iterator<Policy>(
std::begin(value), std::end(value), std::forward<Extra>(extra)...);
}
template <typename InputType, typename OutputType>
void implicitly_convertible() {
struct set_flag {
bool &flag;
explicit set_flag(bool &flag_) : flag(flag_) { flag_ = true; }
~set_flag() { flag = false; }
};
auto implicit_caster = [](PyObject *obj, PyTypeObject *type) -> PyObject * {
#ifdef Py_GIL_DISABLED
thread_local bool currently_used = false;
#else
static bool currently_used = false;
#endif
if (currently_used) { // implicit conversions are non-reentrant
return nullptr;
}
set_flag flag_helper(currently_used);
if (!detail::make_caster<InputType>().load(obj, false)) {
return nullptr;
}
tuple args(1);
args[0] = obj;
PyObject *result = PyObject_Call((PyObject *) type, args.ptr(), nullptr);
if (result == nullptr) {
PyErr_Clear();
}
return result;
};
if (auto *tinfo = detail::get_type_info(typeid(OutputType))) {
tinfo->implicit_conversions.emplace_back(std::move(implicit_caster));
} else {
pybind11_fail("implicitly_convertible: Unable to find type " + type_id<OutputType>());
}
}
inline void register_exception_translator(ExceptionTranslator &&translator) {
detail::with_internals([&](detail::internals &internals) {
internals.registered_exception_translators.push_front(
std::forward<ExceptionTranslator>(translator));
});
}
/**
* Add a new module-local exception translator. Locally registered functions
* will be tried before any globally registered exception translators, which
* will only be invoked if the module-local handlers do not deal with
* the exception.
*/
inline void register_local_exception_translator(ExceptionTranslator &&translator) {
detail::with_internals([&](detail::internals &internals) {
(void) internals;
detail::get_local_internals().registered_exception_translators.push_front(
std::forward<ExceptionTranslator>(translator));
});
}
/**
* Wrapper to generate a new Python exception type.
*
* This should only be used with py::set_error() for now.
* It is not (yet) possible to use as a py::base.
* Template type argument is reserved for future use.
*/
template <typename type>
class exception : public object {
public:
exception() = default;
exception(handle scope, const char *name, handle base = PyExc_Exception) {
std::string full_name
= scope.attr("__name__").cast<std::string>() + std::string(".") + name;
m_ptr = PyErr_NewException(const_cast<char *>(full_name.c_str()), base.ptr(), nullptr);
if (hasattr(scope, "__dict__") && scope.attr("__dict__").contains(name)) {
pybind11_fail("Error during initialization: multiple incompatible "
"definitions with name \""
+ std::string(name) + "\"");
}
scope.attr(name) = *this;
}
// Sets the current python exception to this exception object with the given message
PYBIND11_DEPRECATED("Please use py::set_error() instead "
"(https://github.com/pybind/pybind11/pull/4772)")
void operator()(const char *message) const { set_error(*this, message); }
};
PYBIND11_NAMESPACE_BEGIN(detail)
template <>
struct handle_type_name<exception<void>> {
static constexpr auto name = const_name("Exception");
};
// Helper function for register_exception and register_local_exception
template <typename CppException>
exception<CppException> &
register_exception_impl(handle scope, const char *name, handle base, bool isLocal) {
PYBIND11_CONSTINIT static gil_safe_call_once_and_store<exception<CppException>> exc_storage;
exc_storage.call_once_and_store_result(
[&]() { return exception<CppException>(scope, name, base); });
auto register_func
= isLocal ? ®ister_local_exception_translator : ®ister_exception_translator;
register_func([](std::exception_ptr p) {
if (!p) {
return;
}
try {
std::rethrow_exception(p);
} catch (const CppException &e) {
set_error(exc_storage.get_stored(), e.what());
}
});
return exc_storage.get_stored();
}
PYBIND11_NAMESPACE_END(detail)
/**
* Registers a Python exception in `m` of the given `name` and installs a translator to
* translate the C++ exception to the created Python exception using the what() method.
* This is intended for simple exception translations; for more complex translation, register the
* exception object and translator directly.
*/
template <typename CppException>
exception<CppException> &
register_exception(handle scope, const char *name, handle base = PyExc_Exception) {
return detail::register_exception_impl<CppException>(scope, name, base, false /* isLocal */);
}
/**
* Registers a Python exception in `m` of the given `name` and installs a translator to
* translate the C++ exception to the created Python exception using the what() method.
* This translator will only be used for exceptions that are thrown in this module and will be
* tried before global exception translators, including those registered with register_exception.
* This is intended for simple exception translations; for more complex translation, register the
* exception object and translator directly.
*/
template <typename CppException>
exception<CppException> &
register_local_exception(handle scope, const char *name, handle base = PyExc_Exception) {
return detail::register_exception_impl<CppException>(scope, name, base, true /* isLocal */);
}
PYBIND11_NAMESPACE_BEGIN(detail)
PYBIND11_NOINLINE void print(const tuple &args, const dict &kwargs) {
auto strings = tuple(args.size());
for (size_t i = 0; i < args.size(); ++i) {
strings[i] = str(args[i]);
}
auto sep = kwargs.contains("sep") ? kwargs["sep"] : str(" ");
auto line = sep.attr("join")(std::move(strings));
object file;
if (kwargs.contains("file")) {
file = kwargs["file"].cast<object>();
} else {
try {
file = module_::import("sys").attr("stdout");
} catch (const error_already_set &) {
/* If print() is called from code that is executed as
part of garbage collection during interpreter shutdown,
importing 'sys' can fail. Give up rather than crashing the
interpreter in this case. */
return;
}
}
auto write = file.attr("write");
write(std::move(line));
write(kwargs.contains("end") ? kwargs["end"] : str("\n"));
if (kwargs.contains("flush") && kwargs["flush"].cast<bool>()) {
file.attr("flush")();
}
}
PYBIND11_NAMESPACE_END(detail)
template <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>
void print(Args &&...args) {
auto c = detail::collect_arguments<policy>(std::forward<Args>(args)...);
detail::print(c.args(), c.kwargs());
}
inline void
error_already_set::m_fetched_error_deleter(detail::error_fetch_and_normalize *raw_ptr) {
gil_scoped_acquire gil;
error_scope scope;
delete raw_ptr;
}
inline const char *error_already_set::what() const noexcept {
gil_scoped_acquire gil;
error_scope scope;
return m_fetched_error->error_string().c_str();
}
PYBIND11_NAMESPACE_BEGIN(detail)
inline function
get_type_override(const void *this_ptr, const type_info *this_type, const char *name) {
handle self = get_object_handle(this_ptr, this_type);
if (!self) {
return function();
}
handle type = type::handle_of(self);
auto key = std::make_pair(type.ptr(), name);
/* Cache functions that aren't overridden in Python to avoid
many costly Python dictionary lookups below */
bool not_overridden = with_internals([&key](internals &internals) {
auto &cache = internals.inactive_override_cache;
return cache.find(key) != cache.end();
});
if (not_overridden) {
return function();
}
function override = getattr(self, name, function());
if (override.is_cpp_function()) {
with_internals([&](internals &internals) {
internals.inactive_override_cache.insert(std::move(key));
});
return function();
}
/* Don't call dispatch code if invoked from overridden function.
Unfortunately this doesn't work on PyPy. */
#if !defined(PYPY_VERSION)
# if PY_VERSION_HEX >= 0x03090000
PyFrameObject *frame = PyThreadState_GetFrame(PyThreadState_Get());
if (frame != nullptr) {
PyCodeObject *f_code = PyFrame_GetCode(frame);
// f_code is guaranteed to not be NULL
if ((std::string) str(f_code->co_name) == name && f_code->co_argcount > 0) {
# if PY_VERSION_HEX >= 0x030d0000
PyObject *locals = PyEval_GetFrameLocals();
# else
PyObject *locals = PyEval_GetLocals();
Py_XINCREF(locals);
# endif
if (locals != nullptr) {
# if PY_VERSION_HEX >= 0x030b0000
PyObject *co_varnames = PyCode_GetVarnames(f_code);
# else
PyObject *co_varnames = PyObject_GetAttrString((PyObject *) f_code, "co_varnames");
# endif
PyObject *self_arg = PyTuple_GET_ITEM(co_varnames, 0);
Py_DECREF(co_varnames);
PyObject *self_caller = dict_getitem(locals, self_arg);
Py_DECREF(locals);
if (self_caller == self.ptr()) {
Py_DECREF(f_code);
Py_DECREF(frame);
return function();
}
}
}
Py_DECREF(f_code);
Py_DECREF(frame);
}
# else
PyFrameObject *frame = PyThreadState_Get()->frame;
if (frame != nullptr && (std::string) str(frame->f_code->co_name) == name
&& frame->f_code->co_argcount > 0) {
PyFrame_FastToLocals(frame);
PyObject *self_caller
= dict_getitem(frame->f_locals, PyTuple_GET_ITEM(frame->f_code->co_varnames, 0));
if (self_caller == self.ptr()) {
return function();
}
}
# endif
#else
/* PyPy currently doesn't provide a detailed cpyext emulation of
frame objects, so we have to emulate this using Python. This
is going to be slow..*/
dict d;
d["self"] = self;
d["name"] = pybind11::str(name);
PyObject *result
= PyRun_String("import inspect\n"
"frame = inspect.currentframe()\n"
"if frame is not None:\n"
" frame = frame.f_back\n"
" if frame is not None and str(frame.f_code.co_name) == name and "
"frame.f_code.co_argcount > 0:\n"
" self_caller = frame.f_locals[frame.f_code.co_varnames[0]]\n"
" if self_caller == self:\n"
" self = None\n",
Py_file_input,
d.ptr(),
d.ptr());
if (result == nullptr)
throw error_already_set();
Py_DECREF(result);
if (d["self"].is_none())
return function();
#endif
return override;
}
PYBIND11_NAMESPACE_END(detail)
/** \rst
Try to retrieve a python method by the provided name from the instance pointed to by the
this_ptr.
:this_ptr: The pointer to the object the overridden method should be retrieved for. This should
be the first non-trampoline class encountered in the inheritance chain.
:name: The name of the overridden Python method to retrieve.
:return: The Python method by this name from the object or an empty function wrapper.
\endrst */
template <class T>
function get_override(const T *this_ptr, const char *name) {
auto *tinfo = detail::get_type_info(typeid(T));
return tinfo ? detail::get_type_override(this_ptr, tinfo, name) : function();
}
#define PYBIND11_OVERRIDE_IMPL(ret_type, cname, name, ...) \
do { \
pybind11::gil_scoped_acquire gil; \
pybind11::function override \
= pybind11::get_override(static_cast<const cname *>(this), name); \
if (override) { \
auto o = override(__VA_ARGS__); \
PYBIND11_WARNING_PUSH \
PYBIND11_WARNING_DISABLE_MSVC(4127) \
if (pybind11::detail::cast_is_temporary_value_reference<ret_type>::value \
&& !pybind11::detail::is_same_ignoring_cvref<ret_type, PyObject *>::value) { \
static pybind11::detail::override_caster_t<ret_type> caster; \
return pybind11::detail::cast_ref<ret_type>(std::move(o), caster); \
} \
PYBIND11_WARNING_POP \
return pybind11::detail::cast_safe<ret_type>(std::move(o)); \
} \
} while (false)
/** \rst
Macro to populate the virtual method in the trampoline class. This macro tries to look up a
method named 'fn' from the Python side, deals with the :ref:`gil` and necessary argument
conversions to call this method and return the appropriate type.
See :ref:`overriding_virtuals` for more information. This macro should be used when the method
name in C is not the same as the method name in Python. For example with `__str__`.
.. code-block:: cpp
std::string toString() override {
PYBIND11_OVERRIDE_NAME(
std::string, // Return type (ret_type)
Animal, // Parent class (cname)
"__str__", // Name of method in Python (name)
toString, // Name of function in C++ (fn)
);
}
\endrst */
#define PYBIND11_OVERRIDE_NAME(ret_type, cname, name, fn, ...) \
do { \
PYBIND11_OVERRIDE_IMPL(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__); \
return cname::fn(__VA_ARGS__); \
} while (false)
/** \rst
Macro for pure virtual functions, this function is identical to
:c:macro:`PYBIND11_OVERRIDE_NAME`, except that it throws if no override can be found.
\endrst */
#define PYBIND11_OVERRIDE_PURE_NAME(ret_type, cname, name, fn, ...) \
do { \
PYBIND11_OVERRIDE_IMPL(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__); \
pybind11::pybind11_fail( \
"Tried to call pure virtual function \"" PYBIND11_STRINGIFY(cname) "::" name "\""); \
} while (false)
/** \rst
Macro to populate the virtual method in the trampoline class. This macro tries to look up the
method from the Python side, deals with the :ref:`gil` and necessary argument conversions to
call this method and return the appropriate type. This macro should be used if the method name
in C and in Python are identical.
See :ref:`overriding_virtuals` for more information.
.. code-block:: cpp
class PyAnimal : public Animal {
public:
// Inherit the constructors
using Animal::Animal;
// Trampoline (need one for each virtual function)
std::string go(int n_times) override {
PYBIND11_OVERRIDE_PURE(
std::string, // Return type (ret_type)
Animal, // Parent class (cname)
go, // Name of function in C++ (must match Python name) (fn)
n_times // Argument(s) (...)
);
}
};
\endrst */
#define PYBIND11_OVERRIDE(ret_type, cname, fn, ...) \
PYBIND11_OVERRIDE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__)
/** \rst
Macro for pure virtual functions, this function is identical to :c:macro:`PYBIND11_OVERRIDE`,
except that it throws if no override can be found.
\endrst */
#define PYBIND11_OVERRIDE_PURE(ret_type, cname, fn, ...) \
PYBIND11_OVERRIDE_PURE_NAME( \
PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__)
// Deprecated versions
PYBIND11_DEPRECATED("get_type_overload has been deprecated")
inline function
get_type_overload(const void *this_ptr, const detail::type_info *this_type, const char *name) {
return detail::get_type_override(this_ptr, this_type, name);
}
template <class T>
inline function get_overload(const T *this_ptr, const char *name) {
return get_override(this_ptr, name);
}
#define PYBIND11_OVERLOAD_INT(ret_type, cname, name, ...) \
PYBIND11_OVERRIDE_IMPL(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__)
#define PYBIND11_OVERLOAD_NAME(ret_type, cname, name, fn, ...) \
PYBIND11_OVERRIDE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, fn, __VA_ARGS__)
#define PYBIND11_OVERLOAD_PURE_NAME(ret_type, cname, name, fn, ...) \
PYBIND11_OVERRIDE_PURE_NAME( \
PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, fn, __VA_ARGS__);
#define PYBIND11_OVERLOAD(ret_type, cname, fn, ...) \
PYBIND11_OVERRIDE(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), fn, __VA_ARGS__)
#define PYBIND11_OVERLOAD_PURE(ret_type, cname, fn, ...) \
PYBIND11_OVERRIDE_PURE(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), fn, __VA_ARGS__);
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
========================================================================================================================
SOURCE CODE FILE: pytypes.h
LINES: 14
SIZE: 100.10 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\pytypes.h
ENCODING: utf-8
```h
/*
pybind11/pytypes.h: Convenience wrapper classes for basic Python types
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
#include "buffer_info.h"
#include <assert.h>
#include <cstddef>
#include <exception>
#include <frameobject.h>
#include <iterator>
#include <memory>
#include <string>
#include <type_traits>
#include <typeinfo>
#include <utility>
#if defined(PYBIND11_HAS_OPTIONAL)
# include <optional>
#endif
#ifdef PYBIND11_HAS_STRING_VIEW
# include <string_view>
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_WARNING_DISABLE_MSVC(4127)
/* A few forward declarations */
class handle;
class object;
class str;
class iterator;
class type;
struct arg;
struct arg_v;
PYBIND11_NAMESPACE_BEGIN(detail)
class args_proxy;
bool isinstance_generic(handle obj, const std::type_info &tp);
// Accessor forward declarations
template <typename Policy>
class accessor;
namespace accessor_policies {
struct obj_attr;
struct str_attr;
struct generic_item;
struct sequence_item;
struct list_item;
struct tuple_item;
} // namespace accessor_policies
// PLEASE KEEP handle_type_name SPECIALIZATIONS IN SYNC.
using obj_attr_accessor = accessor<accessor_policies::obj_attr>;
using str_attr_accessor = accessor<accessor_policies::str_attr>;
using item_accessor = accessor<accessor_policies::generic_item>;
using sequence_accessor = accessor<accessor_policies::sequence_item>;
using list_accessor = accessor<accessor_policies::list_item>;
using tuple_accessor = accessor<accessor_policies::tuple_item>;
/// Tag and check to identify a class which implements the Python object API
class pyobject_tag {};
template <typename T>
using is_pyobject = std::is_base_of<pyobject_tag, remove_reference_t<T>>;
/** \rst
A mixin class which adds common functions to `handle`, `object` and various accessors.
The only requirement for `Derived` is to implement ``PyObject *Derived::ptr() const``.
\endrst */
template <typename Derived>
class object_api : public pyobject_tag {
const Derived &derived() const { return static_cast<const Derived &>(*this); }
public:
/** \rst
Return an iterator equivalent to calling ``iter()`` in Python. The object
must be a collection which supports the iteration protocol.
\endrst */
iterator begin() const;
/// Return a sentinel which ends iteration.
iterator end() const;
/** \rst
Return an internal functor to invoke the object's sequence protocol. Casting
the returned ``detail::item_accessor`` instance to a `handle` or `object`
subclass causes a corresponding call to ``__getitem__``. Assigning a `handle`
or `object` subclass causes a call to ``__setitem__``.
\endrst */
item_accessor operator[](handle key) const;
/// See above (the only difference is that the key's reference is stolen)
item_accessor operator[](object &&key) const;
/// See above (the only difference is that the key is provided as a string literal)
item_accessor operator[](const char *key) const;
/** \rst
Return an internal functor to access the object's attributes. Casting the
returned ``detail::obj_attr_accessor`` instance to a `handle` or `object`
subclass causes a corresponding call to ``getattr``. Assigning a `handle`
or `object` subclass causes a call to ``setattr``.
\endrst */
obj_attr_accessor attr(handle key) const;
/// See above (the only difference is that the key's reference is stolen)
obj_attr_accessor attr(object &&key) const;
/// See above (the only difference is that the key is provided as a string literal)
str_attr_accessor attr(const char *key) const;
/** \rst
Matches * unpacking in Python, e.g. to unpack arguments out of a ``tuple``
or ``list`` for a function call. Applying another * to the result yields
** unpacking, e.g. to unpack a dict as function keyword arguments.
See :ref:`calling_python_functions`.
\endrst */
args_proxy operator*() const;
/// Check if the given item is contained within this object, i.e. ``item in obj``.
template <typename T>
bool contains(T &&item) const;
/** \rst
Assuming the Python object is a function or implements the ``__call__``
protocol, ``operator()`` invokes the underlying function, passing an
arbitrary set of parameters. The result is returned as a `object` and
may need to be converted back into a Python object using `handle::cast()`.
When some of the arguments cannot be converted to Python objects, the
function will throw a `cast_error` exception. When the Python function
call fails, a `error_already_set` exception is thrown.
\endrst */
template <return_value_policy policy = return_value_policy::automatic_reference,
typename... Args>
object operator()(Args &&...args) const;
template <return_value_policy policy = return_value_policy::automatic_reference,
typename... Args>
PYBIND11_DEPRECATED("call(...) was deprecated in favor of operator()(...)")
object call(Args &&...args) const;
/// Equivalent to ``obj is other`` in Python.
bool is(object_api const &other) const { return derived().ptr() == other.derived().ptr(); }
/// Equivalent to ``obj is None`` in Python.
bool is_none() const { return derived().ptr() == Py_None; }
/// Equivalent to obj == other in Python
bool equal(object_api const &other) const { return rich_compare(other, Py_EQ); }
bool not_equal(object_api const &other) const { return rich_compare(other, Py_NE); }
bool operator<(object_api const &other) const { return rich_compare(other, Py_LT); }
bool operator<=(object_api const &other) const { return rich_compare(other, Py_LE); }
bool operator>(object_api const &other) const { return rich_compare(other, Py_GT); }
bool operator>=(object_api const &other) const { return rich_compare(other, Py_GE); }
object operator-() const;
object operator~() const;
object operator+(object_api const &other) const;
object operator+=(object_api const &other);
object operator-(object_api const &other) const;
object operator-=(object_api const &other);
object operator*(object_api const &other) const;
object operator*=(object_api const &other);
object operator/(object_api const &other) const;
object operator/=(object_api const &other);
object operator|(object_api const &other) const;
object operator|=(object_api const &other);
object operator&(object_api const &other) const;
object operator&=(object_api const &other);
object operator^(object_api const &other) const;
object operator^=(object_api const &other);
object operator<<(object_api const &other) const;
object operator<<=(object_api const &other);
object operator>>(object_api const &other) const;
object operator>>=(object_api const &other);
PYBIND11_DEPRECATED("Use py::str(obj) instead")
pybind11::str str() const;
/// Get or set the object's docstring, i.e. ``obj.__doc__``.
str_attr_accessor doc() const;
/// Return the object's current reference count
ssize_t ref_count() const {
#ifdef PYPY_VERSION
// PyPy uses the top few bits for REFCNT_FROM_PYPY & REFCNT_FROM_PYPY_LIGHT
// Following pybind11 2.12.1 and older behavior and removing this part
return static_cast<ssize_t>(static_cast<int>(Py_REFCNT(derived().ptr())));
#else
return Py_REFCNT(derived().ptr());
#endif
}
// TODO PYBIND11_DEPRECATED(
// "Call py::type::handle_of(h) or py::type::of(h) instead of h.get_type()")
handle get_type() const;
private:
bool rich_compare(object_api const &other, int value) const;
};
template <typename T>
using is_pyobj_ptr_or_nullptr_t = detail::any_of<std::is_same<T, PyObject *>,
std::is_same<T, PyObject *const>,
std::is_same<T, std::nullptr_t>>;
PYBIND11_NAMESPACE_END(detail)
#if !defined(PYBIND11_HANDLE_REF_DEBUG) && !defined(NDEBUG)
# define PYBIND11_HANDLE_REF_DEBUG
#endif
/** \rst
Holds a reference to a Python object (no reference counting)
The `handle` class is a thin wrapper around an arbitrary Python object (i.e. a
``PyObject *`` in Python's C API). It does not perform any automatic reference
counting and merely provides a basic C++ interface to various Python API functions.
.. seealso::
The `object` class inherits from `handle` and adds automatic reference
counting features.
\endrst */
class handle : public detail::object_api<handle> {
public:
/// The default constructor creates a handle with a ``nullptr``-valued pointer
handle() = default;
/// Enable implicit conversion from ``PyObject *`` and ``nullptr``.
/// Not using ``handle(PyObject *ptr)`` to avoid implicit conversion from ``0``.
template <typename T,
detail::enable_if_t<detail::is_pyobj_ptr_or_nullptr_t<T>::value, int> = 0>
// NOLINTNEXTLINE(google-explicit-constructor)
handle(T ptr) : m_ptr(ptr) {}
/// Enable implicit conversion through ``T::operator PyObject *()``.
template <
typename T,
detail::enable_if_t<detail::all_of<detail::none_of<std::is_base_of<handle, T>,
detail::is_pyobj_ptr_or_nullptr_t<T>>,
std::is_convertible<T, PyObject *>>::value,
int>
= 0>
// NOLINTNEXTLINE(google-explicit-constructor)
handle(T &obj) : m_ptr(obj) {}
/// Return the underlying ``PyObject *`` pointer
PyObject *ptr() const { return m_ptr; }
PyObject *&ptr() { return m_ptr; }
/** \rst
Manually increase the reference count of the Python object. Usually, it is
preferable to use the `object` class which derives from `handle` and calls
this function automatically. Returns a reference to itself.
\endrst */
const handle &inc_ref() const & {
#ifdef PYBIND11_HANDLE_REF_DEBUG
inc_ref_counter(1);
#endif
#ifdef PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF
if (m_ptr != nullptr && !PyGILState_Check()) {
throw_gilstate_error("pybind11::handle::inc_ref()");
}
#endif
Py_XINCREF(m_ptr);
return *this;
}
/** \rst
Manually decrease the reference count of the Python object. Usually, it is
preferable to use the `object` class which derives from `handle` and calls
this function automatically. Returns a reference to itself.
\endrst */
const handle &dec_ref() const & {
#ifdef PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF
if (m_ptr != nullptr && !PyGILState_Check()) {
throw_gilstate_error("pybind11::handle::dec_ref()");
}
#endif
Py_XDECREF(m_ptr);
return *this;
}
/** \rst
Attempt to cast the Python object into the given C++ type. A `cast_error`
will be throw upon failure.
\endrst */
template <typename T>
T cast() const;
/// Return ``true`` when the `handle` wraps a valid Python object
explicit operator bool() const { return m_ptr != nullptr; }
/** \rst
Deprecated: Check that the underlying pointers are the same.
Equivalent to ``obj1 is obj2`` in Python.
\endrst */
PYBIND11_DEPRECATED("Use obj1.is(obj2) instead")
bool operator==(const handle &h) const { return m_ptr == h.m_ptr; }
PYBIND11_DEPRECATED("Use !obj1.is(obj2) instead")
bool operator!=(const handle &h) const { return m_ptr != h.m_ptr; }
PYBIND11_DEPRECATED("Use handle::operator bool() instead")
bool check() const { return m_ptr != nullptr; }
protected:
PyObject *m_ptr = nullptr;
private:
#ifdef PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF
void throw_gilstate_error(const std::string &function_name) const {
fprintf(
stderr,
"%s is being called while the GIL is either not held or invalid. Please see "
"https://pybind11.readthedocs.io/en/stable/advanced/"
"misc.html#common-sources-of-global-interpreter-lock-errors for debugging advice.\n"
"If you are convinced there is no bug in your code, you can #define "
"PYBIND11_NO_ASSERT_GIL_HELD_INCREF_DECREF "
"to disable this check. In that case you have to ensure this #define is consistently "
"used for all translation units linked into a given pybind11 extension, otherwise "
"there will be ODR violations.",
function_name.c_str());
if (Py_TYPE(m_ptr)->tp_name != nullptr) {
fprintf(stderr,
" The failing %s call was triggered on a %s object.",
function_name.c_str(),
Py_TYPE(m_ptr)->tp_name);
}
fprintf(stderr, "\n");
fflush(stderr);
throw std::runtime_error(function_name + " PyGILState_Check() failure.");
}
#endif
#ifdef PYBIND11_HANDLE_REF_DEBUG
static std::size_t inc_ref_counter(std::size_t add) {
thread_local std::size_t counter = 0;
counter += add;
return counter;
}
public:
static std::size_t inc_ref_counter() { return inc_ref_counter(0); }
#endif
};
inline void set_error(const handle &type, const char *message) {
PyErr_SetString(type.ptr(), message);
}
inline void set_error(const handle &type, const handle &value) {
PyErr_SetObject(type.ptr(), value.ptr());
}
/** \rst
Holds a reference to a Python object (with reference counting)
Like `handle`, the `object` class is a thin wrapper around an arbitrary Python
object (i.e. a ``PyObject *`` in Python's C API). In contrast to `handle`, it
optionally increases the object's reference count upon construction, and it
*always* decreases the reference count when the `object` instance goes out of
scope and is destructed. When using `object` instances consistently, it is much
easier to get reference counting right at the first attempt.
\endrst */
class object : public handle {
public:
object() = default;
PYBIND11_DEPRECATED("Use reinterpret_borrow<object>() or reinterpret_steal<object>()")
object(handle h, bool is_borrowed) : handle(h) {
if (is_borrowed) {
inc_ref();
}
}
/// Copy constructor; always increases the reference count
object(const object &o) : handle(o) { inc_ref(); }
/// Move constructor; steals the object from ``other`` and preserves its reference count
object(object &&other) noexcept : handle(other) { other.m_ptr = nullptr; }
/// Destructor; automatically calls `handle::dec_ref()`
~object() { dec_ref(); }
/** \rst
Resets the internal pointer to ``nullptr`` without decreasing the
object's reference count. The function returns a raw handle to the original
Python object.
\endrst */
handle release() {
PyObject *tmp = m_ptr;
m_ptr = nullptr;
return handle(tmp);
}
object &operator=(const object &other) {
// Skip inc_ref and dec_ref if both objects are the same
if (!this->is(other)) {
other.inc_ref();
// Use temporary variable to ensure `*this` remains valid while
// `Py_XDECREF` executes, in case `*this` is accessible from Python.
handle temp(m_ptr);
m_ptr = other.m_ptr;
temp.dec_ref();
}
return *this;
}
object &operator=(object &&other) noexcept {
if (this != &other) {
handle temp(m_ptr);
m_ptr = other.m_ptr;
other.m_ptr = nullptr;
temp.dec_ref();
}
return *this;
}
#define PYBIND11_INPLACE_OP(iop) \
object iop(object_api const &other) { return operator=(handle::iop(other)); }
PYBIND11_INPLACE_OP(operator+=)
PYBIND11_INPLACE_OP(operator-=)
PYBIND11_INPLACE_OP(operator*=)
PYBIND11_INPLACE_OP(operator/=)
PYBIND11_INPLACE_OP(operator|=)
PYBIND11_INPLACE_OP(operator&=)
PYBIND11_INPLACE_OP(operator^=)
PYBIND11_INPLACE_OP(operator<<=)
PYBIND11_INPLACE_OP(operator>>=)
#undef PYBIND11_INPLACE_OP
// Calling cast() on an object lvalue just copies (via handle::cast)
template <typename T>
T cast() const &;
// Calling on an object rvalue does a move, if needed and/or possible
template <typename T>
T cast() &&;
protected:
// Tags for choosing constructors from raw PyObject *
struct borrowed_t {};
struct stolen_t {};
/// @cond BROKEN
template <typename T>
friend T reinterpret_borrow(handle);
template <typename T>
friend T reinterpret_steal(handle);
/// @endcond
public:
// Only accessible from derived classes and the reinterpret_* functions
object(handle h, borrowed_t) : handle(h) { inc_ref(); }
object(handle h, stolen_t) : handle(h) {}
};
/** \rst
Declare that a `handle` or ``PyObject *`` is a certain type and borrow the reference.
The target type ``T`` must be `object` or one of its derived classes. The function
doesn't do any conversions or checks. It's up to the user to make sure that the
target type is correct.
.. code-block:: cpp
PyObject *p = PyList_GetItem(obj, index);
py::object o = reinterpret_borrow<py::object>(p);
// or
py::tuple t = reinterpret_borrow<py::tuple>(p); // <-- `p` must be already be a `tuple`
\endrst */
template <typename T>
T reinterpret_borrow(handle h) {
return {h, object::borrowed_t{}};
}
/** \rst
Like `reinterpret_borrow`, but steals the reference.
.. code-block:: cpp
PyObject *p = PyObject_Str(obj);
py::str s = reinterpret_steal<py::str>(p); // <-- `p` must be already be a `str`
\endrst */
template <typename T>
T reinterpret_steal(handle h) {
return {h, object::stolen_t{}};
}
PYBIND11_NAMESPACE_BEGIN(detail)
// Equivalent to obj.__class__.__name__ (or obj.__name__ if obj is a class).
inline const char *obj_class_name(PyObject *obj) {
if (PyType_Check(obj)) {
return reinterpret_cast<PyTypeObject *>(obj)->tp_name;
}
return Py_TYPE(obj)->tp_name;
}
std::string error_string();
// The code in this struct is very unusual, to minimize the chances of
// masking bugs (elsewhere) by errors during the error handling (here).
// This is meant to be a lifeline for troubleshooting long-running processes
// that crash under conditions that are virtually impossible to reproduce.
// Low-level implementation alternatives are preferred to higher-level ones
// that might raise cascading exceptions. Last-ditch-kind-of attempts are made
// to report as much of the original error as possible, even if there are
// secondary issues obtaining some of the details.
struct error_fetch_and_normalize {
// This comment only applies to Python <= 3.11:
// Immediate normalization is long-established behavior (starting with
// https://github.com/pybind/pybind11/commit/135ba8deafb8bf64a15b24d1513899eb600e2011
// from Sep 2016) and safest. Normalization could be deferred, but this could mask
// errors elsewhere, the performance gain is very minor in typical situations
// (usually the dominant bottleneck is EH unwinding), and the implementation here
// would be more complex.
// Starting with Python 3.12, PyErr_Fetch() normalizes exceptions immediately.
// Any errors during normalization are tracked under __notes__.
explicit error_fetch_and_normalize(const char *called) {
PyErr_Fetch(&m_type.ptr(), &m_value.ptr(), &m_trace.ptr());
if (!m_type) {
pybind11_fail("Internal error: " + std::string(called)
+ " called while "
"Python error indicator not set.");
}
const char *exc_type_name_orig = detail::obj_class_name(m_type.ptr());
if (exc_type_name_orig == nullptr) {
pybind11_fail("Internal error: " + std::string(called)
+ " failed to obtain the name "
"of the original active exception type.");
}
m_lazy_error_string = exc_type_name_orig;
#if PY_VERSION_HEX >= 0x030C0000
// The presence of __notes__ is likely due to exception normalization
// errors, although that is not necessarily true, therefore insert a
// hint only:
if (PyObject_HasAttrString(m_value.ptr(), "__notes__")) {
m_lazy_error_string += "[WITH __notes__]";
}
#else
// PyErr_NormalizeException() may change the exception type if there are cascading
// failures. This can potentially be extremely confusing.
PyErr_NormalizeException(&m_type.ptr(), &m_value.ptr(), &m_trace.ptr());
if (m_type.ptr() == nullptr) {
pybind11_fail("Internal error: " + std::string(called)
+ " failed to normalize the "
"active exception.");
}
const char *exc_type_name_norm = detail::obj_class_name(m_type.ptr());
if (exc_type_name_norm == nullptr) {
pybind11_fail("Internal error: " + std::string(called)
+ " failed to obtain the name "
"of the normalized active exception type.");
}
# if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x07030a00
// This behavior runs the risk of masking errors in the error handling, but avoids a
// conflict with PyPy, which relies on the normalization here to change OSError to
// FileNotFoundError (https://github.com/pybind/pybind11/issues/4075).
m_lazy_error_string = exc_type_name_norm;
# else
if (exc_type_name_norm != m_lazy_error_string) {
std::string msg = std::string(called)
+ ": MISMATCH of original and normalized "
"active exception types: ";
msg += "ORIGINAL ";
msg += m_lazy_error_string;
msg += " REPLACED BY ";
msg += exc_type_name_norm;
msg += ": " + format_value_and_trace();
pybind11_fail(msg);
}
# endif
#endif
}
error_fetch_and_normalize(const error_fetch_and_normalize &) = delete;
error_fetch_and_normalize(error_fetch_and_normalize &&) = delete;
std::string format_value_and_trace() const {
std::string result;
std::string message_error_string;
if (m_value) {
auto value_str = reinterpret_steal<object>(PyObject_Str(m_value.ptr()));
constexpr const char *message_unavailable_exc
= "<MESSAGE UNAVAILABLE DUE TO ANOTHER EXCEPTION>";
if (!value_str) {
message_error_string = detail::error_string();
result = message_unavailable_exc;
} else {
// Not using `value_str.cast<std::string>()`, to not potentially throw a secondary
// error_already_set that will then result in process termination (#4288).
auto value_bytes = reinterpret_steal<object>(
PyUnicode_AsEncodedString(value_str.ptr(), "utf-8", "backslashreplace"));
if (!value_bytes) {
message_error_string = detail::error_string();
result = message_unavailable_exc;
} else {
char *buffer = nullptr;
Py_ssize_t length = 0;
if (PyBytes_AsStringAndSize(value_bytes.ptr(), &buffer, &length) == -1) {
message_error_string = detail::error_string();
result = message_unavailable_exc;
} else {
result = std::string(buffer, static_cast<std::size_t>(length));
}
}
}
#if PY_VERSION_HEX >= 0x030B0000
auto notes
= reinterpret_steal<object>(PyObject_GetAttrString(m_value.ptr(), "__notes__"));
if (!notes) {
PyErr_Clear(); // No notes is good news.
} else {
auto len_notes = PyList_Size(notes.ptr());
if (len_notes < 0) {
result += "\nFAILURE obtaining len(__notes__): " + detail::error_string();
} else {
result += "\n__notes__ (len=" + std::to_string(len_notes) + "):";
for (ssize_t i = 0; i < len_notes; i++) {
PyObject *note = PyList_GET_ITEM(notes.ptr(), i);
auto note_bytes = reinterpret_steal<object>(
PyUnicode_AsEncodedString(note, "utf-8", "backslashreplace"));
if (!note_bytes) {
result += "\nFAILURE obtaining __notes__[" + std::to_string(i)
+ "]: " + detail::error_string();
} else {
char *buffer = nullptr;
Py_ssize_t length = 0;
if (PyBytes_AsStringAndSize(note_bytes.ptr(), &buffer, &length)
== -1) {
result += "\nFAILURE formatting __notes__[" + std::to_string(i)
+ "]: " + detail::error_string();
} else {
result += '\n';
result += std::string(buffer, static_cast<std::size_t>(length));
}
}
}
}
}
#endif
} else {
result = "<MESSAGE UNAVAILABLE>";
}
if (result.empty()) {
result = "<EMPTY MESSAGE>";
}
bool have_trace = false;
if (m_trace) {
#if !defined(PYPY_VERSION)
auto *tb = reinterpret_cast<PyTracebackObject *>(m_trace.ptr());
// Get the deepest trace possible.
while (tb->tb_next) {
tb = tb->tb_next;
}
PyFrameObject *frame = tb->tb_frame;
Py_XINCREF(frame);
result += "\n\nAt:\n";
while (frame) {
# if PY_VERSION_HEX >= 0x030900B1
PyCodeObject *f_code = PyFrame_GetCode(frame);
# else
PyCodeObject *f_code = frame->f_code;
Py_INCREF(f_code);
# endif
int lineno = PyFrame_GetLineNumber(frame);
result += " ";
result += handle(f_code->co_filename).cast<std::string>();
result += '(';
result += std::to_string(lineno);
result += "): ";
result += handle(f_code->co_name).cast<std::string>();
result += '\n';
Py_DECREF(f_code);
# if PY_VERSION_HEX >= 0x030900B1
auto *b_frame = PyFrame_GetBack(frame);
# else
auto *b_frame = frame->f_back;
Py_XINCREF(b_frame);
# endif
Py_DECREF(frame);
frame = b_frame;
}
have_trace = true;
#endif //! defined(PYPY_VERSION)
}
if (!message_error_string.empty()) {
if (!have_trace) {
result += '\n';
}
result += "\nMESSAGE UNAVAILABLE DUE TO EXCEPTION: " + message_error_string;
}
return result;
}
std::string const &error_string() const {
if (!m_lazy_error_string_completed) {
m_lazy_error_string += ": " + format_value_and_trace();
m_lazy_error_string_completed = true;
}
return m_lazy_error_string;
}
void restore() {
if (m_restore_called) {
pybind11_fail("Internal error: pybind11::detail::error_fetch_and_normalize::restore() "
"called a second time. ORIGINAL ERROR: "
+ error_string());
}
PyErr_Restore(m_type.inc_ref().ptr(), m_value.inc_ref().ptr(), m_trace.inc_ref().ptr());
m_restore_called = true;
}
bool matches(handle exc) const {
return (PyErr_GivenExceptionMatches(m_type.ptr(), exc.ptr()) != 0);
}
// Not protecting these for simplicity.
object m_type, m_value, m_trace;
private:
// Only protecting invariants.
mutable std::string m_lazy_error_string;
mutable bool m_lazy_error_string_completed = false;
mutable bool m_restore_called = false;
};
inline std::string error_string() {
return error_fetch_and_normalize("pybind11::detail::error_string").error_string();
}
PYBIND11_NAMESPACE_END(detail)
/// Fetch and hold an error which was already set in Python. An instance of this is typically
/// thrown to propagate python-side errors back through C++ which can either be caught manually or
/// else falls back to the function dispatcher (which then raises the captured error back to
/// python).
class PYBIND11_EXPORT_EXCEPTION error_already_set : public std::exception {
public:
/// Fetches the current Python exception (using PyErr_Fetch()), which will clear the
/// current Python error indicator.
error_already_set()
: m_fetched_error{new detail::error_fetch_and_normalize("pybind11::error_already_set"),
m_fetched_error_deleter} {}
/// The what() result is built lazily on demand.
/// WARNING: This member function needs to acquire the Python GIL. This can lead to
/// crashes (undefined behavior) if the Python interpreter is finalizing.
const char *what() const noexcept override;
/// Restores the currently-held Python error (which will clear the Python error indicator first
/// if already set).
/// NOTE: This member function will always restore the normalized exception, which may or may
/// not be the original Python exception.
/// WARNING: The GIL must be held when this member function is called!
void restore() { m_fetched_error->restore(); }
/// If it is impossible to raise the currently-held error, such as in a destructor, we can
/// write it out using Python's unraisable hook (`sys.unraisablehook`). The error context
/// should be some object whose `repr()` helps identify the location of the error. Python
/// already knows the type and value of the error, so there is no need to repeat that.
void discard_as_unraisable(object err_context) {
restore();
PyErr_WriteUnraisable(err_context.ptr());
}
/// An alternate version of `discard_as_unraisable()`, where a string provides information on
/// the location of the error. For example, `__func__` could be helpful.
/// WARNING: The GIL must be held when this member function is called!
void discard_as_unraisable(const char *err_context) {
discard_as_unraisable(reinterpret_steal<object>(PYBIND11_FROM_STRING(err_context)));
}
// Does nothing; provided for backwards compatibility.
PYBIND11_DEPRECATED("Use of error_already_set.clear() is deprecated")
void clear() {}
/// Check if the currently trapped error type matches the given Python exception class (or a
/// subclass thereof). May also be passed a tuple to search for any exception class matches in
/// the given tuple.
bool matches(handle exc) const { return m_fetched_error->matches(exc); }
const object &type() const { return m_fetched_error->m_type; }
const object &value() const { return m_fetched_error->m_value; }
const object &trace() const { return m_fetched_error->m_trace; }
private:
std::shared_ptr<detail::error_fetch_and_normalize> m_fetched_error;
/// WARNING: This custom deleter needs to acquire the Python GIL. This can lead to
/// crashes (undefined behavior) if the Python interpreter is finalizing.
static void m_fetched_error_deleter(detail::error_fetch_and_normalize *raw_ptr);
};
/// Replaces the current Python error indicator with the chosen error, performing a
/// 'raise from' to indicate that the chosen error was caused by the original error.
inline void raise_from(PyObject *type, const char *message) {
// Based on _PyErr_FormatVFromCause:
// https://github.com/python/cpython/blob/467ab194fc6189d9f7310c89937c51abeac56839/Python/errors.c#L405
// See https://github.com/pybind/pybind11/pull/2112 for details.
PyObject *exc = nullptr, *val = nullptr, *val2 = nullptr, *tb = nullptr;
assert(PyErr_Occurred());
PyErr_Fetch(&exc, &val, &tb);
PyErr_NormalizeException(&exc, &val, &tb);
if (tb != nullptr) {
PyException_SetTraceback(val, tb);
Py_DECREF(tb);
}
Py_DECREF(exc);
assert(!PyErr_Occurred());
PyErr_SetString(type, message);
PyErr_Fetch(&exc, &val2, &tb);
PyErr_NormalizeException(&exc, &val2, &tb);
Py_INCREF(val);
PyException_SetCause(val2, val);
PyException_SetContext(val2, val);
PyErr_Restore(exc, val2, tb);
}
/// Sets the current Python error indicator with the chosen error, performing a 'raise from'
/// from the error contained in error_already_set to indicate that the chosen error was
/// caused by the original error.
inline void raise_from(error_already_set &err, PyObject *type, const char *message) {
err.restore();
raise_from(type, message);
}
/** \defgroup python_builtins const_name
Unless stated otherwise, the following C++ functions behave the same
as their Python counterparts.
*/
/** \ingroup python_builtins
\rst
Return true if ``obj`` is an instance of ``T``. Type ``T`` must be a subclass of
`object` or a class which was exposed to Python as ``py::class_<T>``.
\endrst */
template <typename T, detail::enable_if_t<std::is_base_of<object, T>::value, int> = 0>
bool isinstance(handle obj) {
return T::check_(obj);
}
template <typename T, detail::enable_if_t<!std::is_base_of<object, T>::value, int> = 0>
bool isinstance(handle obj) {
return detail::isinstance_generic(obj, typeid(T));
}
template <>
inline bool isinstance<handle>(handle) = delete;
template <>
inline bool isinstance<object>(handle obj) {
return obj.ptr() != nullptr;
}
/// \ingroup python_builtins
/// Return true if ``obj`` is an instance of the ``type``.
inline bool isinstance(handle obj, handle type) {
const auto result = PyObject_IsInstance(obj.ptr(), type.ptr());
if (result == -1) {
throw error_already_set();
}
return result != 0;
}
/// \addtogroup python_builtins
/// @{
inline bool hasattr(handle obj, handle name) {
return PyObject_HasAttr(obj.ptr(), name.ptr()) == 1;
}
inline bool hasattr(handle obj, const char *name) {
return PyObject_HasAttrString(obj.ptr(), name) == 1;
}
inline void delattr(handle obj, handle name) {
if (PyObject_DelAttr(obj.ptr(), name.ptr()) != 0) {
throw error_already_set();
}
}
inline void delattr(handle obj, const char *name) {
if (PyObject_DelAttrString(obj.ptr(), name) != 0) {
throw error_already_set();
}
}
inline object getattr(handle obj, handle name) {
PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr());
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
inline object getattr(handle obj, const char *name) {
PyObject *result = PyObject_GetAttrString(obj.ptr(), name);
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
inline object getattr(handle obj, handle name, handle default_) {
if (PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr())) {
return reinterpret_steal<object>(result);
}
PyErr_Clear();
return reinterpret_borrow<object>(default_);
}
inline object getattr(handle obj, const char *name, handle default_) {
if (PyObject *result = PyObject_GetAttrString(obj.ptr(), name)) {
return reinterpret_steal<object>(result);
}
PyErr_Clear();
return reinterpret_borrow<object>(default_);
}
inline void setattr(handle obj, handle name, handle value) {
if (PyObject_SetAttr(obj.ptr(), name.ptr(), value.ptr()) != 0) {
throw error_already_set();
}
}
inline void setattr(handle obj, const char *name, handle value) {
if (PyObject_SetAttrString(obj.ptr(), name, value.ptr()) != 0) {
throw error_already_set();
}
}
inline ssize_t hash(handle obj) {
auto h = PyObject_Hash(obj.ptr());
if (h == -1) {
throw error_already_set();
}
return h;
}
/// @} python_builtins
PYBIND11_NAMESPACE_BEGIN(detail)
inline handle get_function(handle value) {
if (value) {
if (PyInstanceMethod_Check(value.ptr())) {
value = PyInstanceMethod_GET_FUNCTION(value.ptr());
} else if (PyMethod_Check(value.ptr())) {
value = PyMethod_GET_FUNCTION(value.ptr());
}
}
return value;
}
// Reimplementation of python's dict helper functions to ensure that exceptions
// aren't swallowed (see #2862)
// copied from cpython _PyDict_GetItemStringWithError
inline PyObject *dict_getitemstring(PyObject *v, const char *key) {
PyObject *kv = nullptr, *rv = nullptr;
kv = PyUnicode_FromString(key);
if (kv == nullptr) {
throw error_already_set();
}
rv = PyDict_GetItemWithError(v, kv);
Py_DECREF(kv);
if (rv == nullptr && PyErr_Occurred()) {
throw error_already_set();
}
return rv;
}
inline PyObject *dict_getitem(PyObject *v, PyObject *key) {
PyObject *rv = PyDict_GetItemWithError(v, key);
if (rv == nullptr && PyErr_Occurred()) {
throw error_already_set();
}
return rv;
}
inline PyObject *dict_getitemstringref(PyObject *v, const char *key) {
#if PY_VERSION_HEX >= 0x030D0000
PyObject *rv;
if (PyDict_GetItemStringRef(v, key, &rv) < 0) {
throw error_already_set();
}
return rv;
#else
PyObject *rv = dict_getitemstring(v, key);
if (rv == nullptr && PyErr_Occurred()) {
throw error_already_set();
}
Py_XINCREF(rv);
return rv;
#endif
}
// Helper aliases/functions to support implicit casting of values given to python
// accessors/methods. When given a pyobject, this simply returns the pyobject as-is; for other C++
// type, the value goes through pybind11::cast(obj) to convert it to an `object`.
template <typename T, enable_if_t<is_pyobject<T>::value, int> = 0>
auto object_or_cast(T &&o) -> decltype(std::forward<T>(o)) {
return std::forward<T>(o);
}
// The following casting version is implemented in cast.h:
template <typename T, enable_if_t<!is_pyobject<T>::value, int> = 0>
object object_or_cast(T &&o);
// Match a PyObject*, which we want to convert directly to handle via its converting constructor
inline handle object_or_cast(PyObject *ptr) { return ptr; }
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_MSVC(4522) // warning C4522: multiple assignment operators specified
template <typename Policy>
class accessor : public object_api<accessor<Policy>> {
using key_type = typename Policy::key_type;
public:
accessor(handle obj, key_type key) : obj(obj), key(std::move(key)) {}
accessor(const accessor &) = default;
accessor(accessor &&) noexcept = default;
// accessor overload required to override default assignment operator (templates are not
// allowed to replace default compiler-generated assignments).
void operator=(const accessor &a) && { std::move(*this).operator=(handle(a)); }
void operator=(const accessor &a) & { operator=(handle(a)); }
template <typename T>
void operator=(T &&value) && {
Policy::set(obj, key, object_or_cast(std::forward<T>(value)));
}
template <typename T>
void operator=(T &&value) & {
get_cache() = ensure_object(object_or_cast(std::forward<T>(value)));
}
template <typename T = Policy>
PYBIND11_DEPRECATED(
"Use of obj.attr(...) as bool is deprecated in favor of pybind11::hasattr(obj, ...)")
explicit
operator enable_if_t<std::is_same<T, accessor_policies::str_attr>::value
|| std::is_same<T, accessor_policies::obj_attr>::value,
bool>() const {
return hasattr(obj, key);
}
template <typename T = Policy>
PYBIND11_DEPRECATED("Use of obj[key] as bool is deprecated in favor of obj.contains(key)")
explicit
operator enable_if_t<std::is_same<T, accessor_policies::generic_item>::value, bool>() const {
return obj.contains(key);
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator object() const { return get_cache(); }
PyObject *ptr() const { return get_cache().ptr(); }
template <typename T>
T cast() const {
return get_cache().template cast<T>();
}
private:
static object ensure_object(object &&o) { return std::move(o); }
static object ensure_object(handle h) { return reinterpret_borrow<object>(h); }
object &get_cache() const {
if (!cache) {
cache = Policy::get(obj, key);
}
return cache;
}
private:
handle obj;
key_type key;
mutable object cache;
};
PYBIND11_WARNING_POP
PYBIND11_NAMESPACE_BEGIN(accessor_policies)
struct obj_attr {
using key_type = object;
static object get(handle obj, handle key) { return getattr(obj, key); }
static void set(handle obj, handle key, handle val) { setattr(obj, key, val); }
};
struct str_attr {
using key_type = const char *;
static object get(handle obj, const char *key) { return getattr(obj, key); }
static void set(handle obj, const char *key, handle val) { setattr(obj, key, val); }
};
struct generic_item {
using key_type = object;
static object get(handle obj, handle key) {
PyObject *result = PyObject_GetItem(obj.ptr(), key.ptr());
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
static void set(handle obj, handle key, handle val) {
if (PyObject_SetItem(obj.ptr(), key.ptr(), val.ptr()) != 0) {
throw error_already_set();
}
}
};
struct sequence_item {
using key_type = size_t;
template <typename IdxType, detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
static object get(handle obj, const IdxType &index) {
PyObject *result = PySequence_GetItem(obj.ptr(), ssize_t_cast(index));
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
template <typename IdxType, detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
static void set(handle obj, const IdxType &index, handle val) {
// PySequence_SetItem does not steal a reference to 'val'
if (PySequence_SetItem(obj.ptr(), ssize_t_cast(index), val.ptr()) != 0) {
throw error_already_set();
}
}
};
struct list_item {
using key_type = size_t;
template <typename IdxType, detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
static object get(handle obj, const IdxType &index) {
PyObject *result = PyList_GetItem(obj.ptr(), ssize_t_cast(index));
if (!result) {
throw error_already_set();
}
return reinterpret_borrow<object>(result);
}
template <typename IdxType, detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
static void set(handle obj, const IdxType &index, handle val) {
// PyList_SetItem steals a reference to 'val'
if (PyList_SetItem(obj.ptr(), ssize_t_cast(index), val.inc_ref().ptr()) != 0) {
throw error_already_set();
}
}
};
struct tuple_item {
using key_type = size_t;
template <typename IdxType, detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
static object get(handle obj, const IdxType &index) {
PyObject *result = PyTuple_GetItem(obj.ptr(), ssize_t_cast(index));
if (!result) {
throw error_already_set();
}
return reinterpret_borrow<object>(result);
}
template <typename IdxType, detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
static void set(handle obj, const IdxType &index, handle val) {
// PyTuple_SetItem steals a reference to 'val'
if (PyTuple_SetItem(obj.ptr(), ssize_t_cast(index), val.inc_ref().ptr()) != 0) {
throw error_already_set();
}
}
};
PYBIND11_NAMESPACE_END(accessor_policies)
/// STL iterator template used for tuple, list, sequence and dict
template <typename Policy>
class generic_iterator : public Policy {
using It = generic_iterator;
public:
using difference_type = ssize_t;
using iterator_category = typename Policy::iterator_category;
using value_type = typename Policy::value_type;
using reference = typename Policy::reference;
using pointer = typename Policy::pointer;
generic_iterator() = default;
generic_iterator(handle seq, ssize_t index) : Policy(seq, index) {}
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
reference operator*() const { return Policy::dereference(); }
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
reference operator[](difference_type n) const { return *(*this + n); }
pointer operator->() const { return **this; }
It &operator++() {
Policy::increment();
return *this;
}
It operator++(int) {
auto copy = *this;
Policy::increment();
return copy;
}
It &operator--() {
Policy::decrement();
return *this;
}
It operator--(int) {
auto copy = *this;
Policy::decrement();
return copy;
}
It &operator+=(difference_type n) {
Policy::advance(n);
return *this;
}
It &operator-=(difference_type n) {
Policy::advance(-n);
return *this;
}
friend It operator+(const It &a, difference_type n) {
auto copy = a;
return copy += n;
}
friend It operator+(difference_type n, const It &b) { return b + n; }
friend It operator-(const It &a, difference_type n) {
auto copy = a;
return copy -= n;
}
friend difference_type operator-(const It &a, const It &b) { return a.distance_to(b); }
friend bool operator==(const It &a, const It &b) { return a.equal(b); }
friend bool operator!=(const It &a, const It &b) { return !(a == b); }
friend bool operator<(const It &a, const It &b) { return b - a > 0; }
friend bool operator>(const It &a, const It &b) { return b < a; }
friend bool operator>=(const It &a, const It &b) { return !(a < b); }
friend bool operator<=(const It &a, const It &b) { return !(a > b); }
};
PYBIND11_NAMESPACE_BEGIN(iterator_policies)
/// Quick proxy class needed to implement ``operator->`` for iterators which can't return pointers
template <typename T>
struct arrow_proxy {
T value;
// NOLINTNEXTLINE(google-explicit-constructor)
arrow_proxy(T &&value) noexcept : value(std::move(value)) {}
T *operator->() const { return &value; }
};
/// Lightweight iterator policy using just a simple pointer: see ``PySequence_Fast_ITEMS``
class sequence_fast_readonly {
protected:
using iterator_category = std::random_access_iterator_tag;
using value_type = handle;
using reference = const handle; // PR #3263
using pointer = arrow_proxy<const handle>;
sequence_fast_readonly(handle obj, ssize_t n) : ptr(PySequence_Fast_ITEMS(obj.ptr()) + n) {}
sequence_fast_readonly() = default;
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
reference dereference() const { return *ptr; }
void increment() { ++ptr; }
void decrement() { --ptr; }
void advance(ssize_t n) { ptr += n; }
bool equal(const sequence_fast_readonly &b) const { return ptr == b.ptr; }
ssize_t distance_to(const sequence_fast_readonly &b) const { return ptr - b.ptr; }
private:
PyObject **ptr;
};
/// Full read and write access using the sequence protocol: see ``detail::sequence_accessor``
class sequence_slow_readwrite {
protected:
using iterator_category = std::random_access_iterator_tag;
using value_type = object;
using reference = sequence_accessor;
using pointer = arrow_proxy<const sequence_accessor>;
sequence_slow_readwrite(handle obj, ssize_t index) : obj(obj), index(index) {}
sequence_slow_readwrite() = default;
reference dereference() const { return {obj, static_cast<size_t>(index)}; }
void increment() { ++index; }
void decrement() { --index; }
void advance(ssize_t n) { index += n; }
bool equal(const sequence_slow_readwrite &b) const { return index == b.index; }
ssize_t distance_to(const sequence_slow_readwrite &b) const { return index - b.index; }
private:
handle obj;
ssize_t index;
};
/// Python's dictionary protocol permits this to be a forward iterator
class dict_readonly {
protected:
using iterator_category = std::forward_iterator_tag;
using value_type = std::pair<handle, handle>;
using reference = const value_type; // PR #3263
using pointer = arrow_proxy<const value_type>;
dict_readonly() = default;
dict_readonly(handle obj, ssize_t pos) : obj(obj), pos(pos) { increment(); }
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
reference dereference() const { return {key, value}; }
void increment() {
if (PyDict_Next(obj.ptr(), &pos, &key, &value) == 0) {
pos = -1;
}
}
bool equal(const dict_readonly &b) const { return pos == b.pos; }
private:
handle obj;
PyObject *key = nullptr, *value = nullptr;
ssize_t pos = -1;
};
PYBIND11_NAMESPACE_END(iterator_policies)
#if !defined(PYPY_VERSION)
using tuple_iterator = generic_iterator<iterator_policies::sequence_fast_readonly>;
using list_iterator = generic_iterator<iterator_policies::sequence_fast_readonly>;
#else
using tuple_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;
using list_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;
#endif
using sequence_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;
using dict_iterator = generic_iterator<iterator_policies::dict_readonly>;
inline bool PyIterable_Check(PyObject *obj) {
PyObject *iter = PyObject_GetIter(obj);
if (iter) {
Py_DECREF(iter);
return true;
}
PyErr_Clear();
return false;
}
inline bool PyNone_Check(PyObject *o) { return o == Py_None; }
inline bool PyEllipsis_Check(PyObject *o) { return o == Py_Ellipsis; }
#ifdef PYBIND11_STR_LEGACY_PERMISSIVE
inline bool PyUnicode_Check_Permissive(PyObject *o) {
return PyUnicode_Check(o) || PYBIND11_BYTES_CHECK(o);
}
# define PYBIND11_STR_CHECK_FUN detail::PyUnicode_Check_Permissive
#else
# define PYBIND11_STR_CHECK_FUN PyUnicode_Check
#endif
inline bool PyStaticMethod_Check(PyObject *o) { return o->ob_type == &PyStaticMethod_Type; }
class kwargs_proxy : public handle {
public:
explicit kwargs_proxy(handle h) : handle(h) {}
};
class args_proxy : public handle {
public:
explicit args_proxy(handle h) : handle(h) {}
kwargs_proxy operator*() const { return kwargs_proxy(*this); }
};
/// Python argument categories (using PEP 448 terms)
template <typename T>
using is_keyword = std::is_base_of<arg, T>;
template <typename T>
using is_s_unpacking = std::is_same<args_proxy, T>; // * unpacking
template <typename T>
using is_ds_unpacking = std::is_same<kwargs_proxy, T>; // ** unpacking
template <typename T>
using is_positional = satisfies_none_of<T, is_keyword, is_s_unpacking, is_ds_unpacking>;
template <typename T>
using is_keyword_or_ds = satisfies_any_of<T, is_keyword, is_ds_unpacking>;
// Call argument collector forward declarations
template <return_value_policy policy = return_value_policy::automatic_reference>
class simple_collector;
template <return_value_policy policy = return_value_policy::automatic_reference>
class unpacking_collector;
PYBIND11_NAMESPACE_END(detail)
// TODO: After the deprecated constructors are removed, this macro can be simplified by
// inheriting ctors: `using Parent::Parent`. It's not an option right now because
// the `using` statement triggers the parent deprecation warning even if the ctor
// isn't even used.
#define PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
public: \
PYBIND11_DEPRECATED("Use reinterpret_borrow<" #Name ">() or reinterpret_steal<" #Name ">()") \
Name(handle h, bool is_borrowed) \
: Parent(is_borrowed ? Parent(h, borrowed_t{}) : Parent(h, stolen_t{})) {} \
Name(handle h, borrowed_t) : Parent(h, borrowed_t{}) {} \
Name(handle h, stolen_t) : Parent(h, stolen_t{}) {} \
PYBIND11_DEPRECATED("Use py::isinstance<py::python_type>(obj) instead") \
bool check() const { return m_ptr != nullptr && (CheckFun(m_ptr) != 0); } \
static bool check_(handle h) { return h.ptr() != nullptr && CheckFun(h.ptr()); } \
template <typename Policy_> /* NOLINTNEXTLINE(google-explicit-constructor) */ \
Name(const ::pybind11::detail::accessor<Policy_> &a) : Name(object(a)) {}
#define PYBIND11_OBJECT_CVT(Name, Parent, CheckFun, ConvertFun) \
PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
/* This is deliberately not 'explicit' to allow implicit conversion from object: */ \
/* NOLINTNEXTLINE(google-explicit-constructor) */ \
Name(const object &o) \
: Parent(check_(o) ? o.inc_ref().ptr() : ConvertFun(o.ptr()), stolen_t{}) { \
if (!m_ptr) \
throw ::pybind11::error_already_set(); \
} \
/* NOLINTNEXTLINE(google-explicit-constructor) */ \
Name(object &&o) : Parent(check_(o) ? o.release().ptr() : ConvertFun(o.ptr()), stolen_t{}) { \
if (!m_ptr) \
throw ::pybind11::error_already_set(); \
}
#define PYBIND11_OBJECT_CVT_DEFAULT(Name, Parent, CheckFun, ConvertFun) \
PYBIND11_OBJECT_CVT(Name, Parent, CheckFun, ConvertFun) \
Name() = default;
#define PYBIND11_OBJECT_CHECK_FAILED(Name, o_ptr) \
::pybind11::type_error("Object of type '" \
+ ::pybind11::detail::get_fully_qualified_tp_name(Py_TYPE(o_ptr)) \
+ "' is not an instance of '" #Name "'")
#define PYBIND11_OBJECT(Name, Parent, CheckFun) \
PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
/* This is deliberately not 'explicit' to allow implicit conversion from object: */ \
/* NOLINTNEXTLINE(google-explicit-constructor) */ \
Name(const object &o) : Parent(o) { \
if (m_ptr && !check_(m_ptr)) \
throw PYBIND11_OBJECT_CHECK_FAILED(Name, m_ptr); \
} \
/* NOLINTNEXTLINE(google-explicit-constructor) */ \
Name(object &&o) : Parent(std::move(o)) { \
if (m_ptr && !check_(m_ptr)) \
throw PYBIND11_OBJECT_CHECK_FAILED(Name, m_ptr); \
}
#define PYBIND11_OBJECT_DEFAULT(Name, Parent, CheckFun) \
PYBIND11_OBJECT(Name, Parent, CheckFun) \
Name() = default;
/// \addtogroup pytypes
/// @{
/** \rst
Wraps a Python iterator so that it can also be used as a C++ input iterator
Caveat: copying an iterator does not (and cannot) clone the internal
state of the Python iterable. This also applies to the post-increment
operator. This iterator should only be used to retrieve the current
value using ``operator*()``.
\endrst */
class iterator : public object {
public:
using iterator_category = std::input_iterator_tag;
using difference_type = ssize_t;
using value_type = handle;
using reference = const handle; // PR #3263
using pointer = const handle *;
PYBIND11_OBJECT_DEFAULT(iterator, object, PyIter_Check)
iterator &operator++() {
advance();
return *this;
}
iterator operator++(int) {
auto rv = *this;
advance();
return rv;
}
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
reference operator*() const {
if (m_ptr && !value.ptr()) {
auto &self = const_cast<iterator &>(*this);
self.advance();
}
return value;
}
pointer operator->() const {
operator*();
return &value;
}
/** \rst
The value which marks the end of the iteration. ``it == iterator::sentinel()``
is equivalent to catching ``StopIteration`` in Python.
.. code-block:: cpp
void foo(py::iterator it) {
while (it != py::iterator::sentinel()) {
// use `*it`
++it;
}
}
\endrst */
static iterator sentinel() { return {}; }
friend bool operator==(const iterator &a, const iterator &b) { return a->ptr() == b->ptr(); }
friend bool operator!=(const iterator &a, const iterator &b) { return a->ptr() != b->ptr(); }
private:
void advance() {
value = reinterpret_steal<object>(PyIter_Next(m_ptr));
if (value.ptr() == nullptr && PyErr_Occurred()) {
throw error_already_set();
}
}
private:
object value = {};
};
class type : public object {
public:
PYBIND11_OBJECT(type, object, PyType_Check)
/// Return a type handle from a handle or an object
static handle handle_of(handle h) { return handle((PyObject *) Py_TYPE(h.ptr())); }
/// Return a type object from a handle or an object
static type of(handle h) { return type(type::handle_of(h), borrowed_t{}); }
// Defined in pybind11/cast.h
/// Convert C++ type to handle if previously registered. Does not convert
/// standard types, like int, float. etc. yet.
/// See https://github.com/pybind/pybind11/issues/2486
template <typename T>
static handle handle_of();
/// Convert C++ type to type if previously registered. Does not convert
/// standard types, like int, float. etc. yet.
/// See https://github.com/pybind/pybind11/issues/2486
template <typename T>
static type of() {
return type(type::handle_of<T>(), borrowed_t{});
}
};
class iterable : public object {
public:
PYBIND11_OBJECT_DEFAULT(iterable, object, detail::PyIterable_Check)
};
class bytes;
class str : public object {
public:
PYBIND11_OBJECT_CVT(str, object, PYBIND11_STR_CHECK_FUN, raw_str)
template <typename SzType, detail::enable_if_t<std::is_integral<SzType>::value, int> = 0>
str(const char *c, const SzType &n)
: object(PyUnicode_FromStringAndSize(c, ssize_t_cast(n)), stolen_t{}) {
if (!m_ptr) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Could not allocate string object!");
}
}
// 'explicit' is explicitly omitted from the following constructors to allow implicit
// conversion to py::str from C++ string-like objects
// NOLINTNEXTLINE(google-explicit-constructor)
str(const char *c = "") : object(PyUnicode_FromString(c), stolen_t{}) {
if (!m_ptr) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Could not allocate string object!");
}
}
// NOLINTNEXTLINE(google-explicit-constructor)
str(const std::string &s) : str(s.data(), s.size()) {}
#ifdef PYBIND11_HAS_STRING_VIEW
// enable_if is needed to avoid "ambiguous conversion" errors (see PR #3521).
template <typename T, detail::enable_if_t<std::is_same<T, std::string_view>::value, int> = 0>
// NOLINTNEXTLINE(google-explicit-constructor)
str(T s) : str(s.data(), s.size()) {}
# ifdef PYBIND11_HAS_U8STRING
// reinterpret_cast here is safe (C++20 guarantees char8_t has the same size/alignment as char)
// NOLINTNEXTLINE(google-explicit-constructor)
str(std::u8string_view s) : str(reinterpret_cast<const char *>(s.data()), s.size()) {}
# endif
#endif
explicit str(const bytes &b);
/** \rst
Return a string representation of the object. This is analogous to
the ``str()`` function in Python.
\endrst */
explicit str(handle h) : object(raw_str(h.ptr()), stolen_t{}) {
if (!m_ptr) {
throw error_already_set();
}
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator std::string() const {
object temp = *this;
if (PyUnicode_Check(m_ptr)) {
temp = reinterpret_steal<object>(PyUnicode_AsUTF8String(m_ptr));
if (!temp) {
throw error_already_set();
}
}
char *buffer = nullptr;
ssize_t length = 0;
if (PyBytes_AsStringAndSize(temp.ptr(), &buffer, &length) != 0) {
throw error_already_set();
}
return std::string(buffer, (size_t) length);
}
template <typename... Args>
str format(Args &&...args) const {
return attr("format")(std::forward<Args>(args)...);
}
private:
/// Return string representation -- always returns a new reference, even if already a str
static PyObject *raw_str(PyObject *op) {
PyObject *str_value = PyObject_Str(op);
return str_value;
}
};
/// @} pytypes
inline namespace literals {
/** \rst
String literal version of `str`
\endrst */
inline str
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 5
operator"" _s // gcc 4.8.5 insists on having a space (hard error).
#else
operator""_s // clang 17 generates a deprecation warning if there is a space.
#endif
(const char *s, size_t size) {
return {s, size};
}
} // namespace literals
/// \addtogroup pytypes
/// @{
class bytes : public object {
public:
PYBIND11_OBJECT(bytes, object, PYBIND11_BYTES_CHECK)
// Allow implicit conversion:
// NOLINTNEXTLINE(google-explicit-constructor)
bytes(const char *c = "") : object(PYBIND11_BYTES_FROM_STRING(c), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate bytes object!");
}
}
template <typename SzType, detail::enable_if_t<std::is_integral<SzType>::value, int> = 0>
bytes(const char *c, const SzType &n)
: object(PYBIND11_BYTES_FROM_STRING_AND_SIZE(c, ssize_t_cast(n)), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate bytes object!");
}
}
// Allow implicit conversion:
// NOLINTNEXTLINE(google-explicit-constructor)
bytes(const std::string &s) : bytes(s.data(), s.size()) {}
explicit bytes(const pybind11::str &s);
// NOLINTNEXTLINE(google-explicit-constructor)
operator std::string() const { return string_op<std::string>(); }
#ifdef PYBIND11_HAS_STRING_VIEW
// enable_if is needed to avoid "ambiguous conversion" errors (see PR #3521).
template <typename T, detail::enable_if_t<std::is_same<T, std::string_view>::value, int> = 0>
// NOLINTNEXTLINE(google-explicit-constructor)
bytes(T s) : bytes(s.data(), s.size()) {}
// Obtain a string view that views the current `bytes` buffer value. Note that this is only
// valid so long as the `bytes` instance remains alive and so generally should not outlive the
// lifetime of the `bytes` instance.
// NOLINTNEXTLINE(google-explicit-constructor)
operator std::string_view() const { return string_op<std::string_view>(); }
#endif
private:
template <typename T>
T string_op() const {
char *buffer = nullptr;
ssize_t length = 0;
if (PyBytes_AsStringAndSize(m_ptr, &buffer, &length) != 0) {
throw error_already_set();
}
return {buffer, static_cast<size_t>(length)};
}
};
// Note: breathe >= 4.17.0 will fail to build docs if the below two constructors
// are included in the doxygen group; close here and reopen after as a workaround
/// @} pytypes
inline bytes::bytes(const pybind11::str &s) {
object temp = s;
if (PyUnicode_Check(s.ptr())) {
temp = reinterpret_steal<object>(PyUnicode_AsUTF8String(s.ptr()));
if (!temp) {
throw error_already_set();
}
}
char *buffer = nullptr;
ssize_t length = 0;
if (PyBytes_AsStringAndSize(temp.ptr(), &buffer, &length) != 0) {
throw error_already_set();
}
auto obj = reinterpret_steal<object>(PYBIND11_BYTES_FROM_STRING_AND_SIZE(buffer, length));
if (!obj) {
pybind11_fail("Could not allocate bytes object!");
}
m_ptr = obj.release().ptr();
}
inline str::str(const bytes &b) {
char *buffer = nullptr;
ssize_t length = 0;
if (PyBytes_AsStringAndSize(b.ptr(), &buffer, &length) != 0) {
throw error_already_set();
}
auto obj = reinterpret_steal<object>(PyUnicode_FromStringAndSize(buffer, length));
if (!obj) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Could not allocate string object!");
}
m_ptr = obj.release().ptr();
}
/// \addtogroup pytypes
/// @{
class bytearray : public object {
public:
PYBIND11_OBJECT_CVT(bytearray, object, PyByteArray_Check, PyByteArray_FromObject)
template <typename SzType, detail::enable_if_t<std::is_integral<SzType>::value, int> = 0>
bytearray(const char *c, const SzType &n)
: object(PyByteArray_FromStringAndSize(c, ssize_t_cast(n)), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate bytearray object!");
}
}
bytearray() : bytearray("", 0) {}
explicit bytearray(const std::string &s) : bytearray(s.data(), s.size()) {}
size_t size() const { return static_cast<size_t>(PyByteArray_Size(m_ptr)); }
explicit operator std::string() const {
char *buffer = PyByteArray_AS_STRING(m_ptr);
ssize_t size = PyByteArray_GET_SIZE(m_ptr);
return std::string(buffer, static_cast<size_t>(size));
}
};
// Note: breathe >= 4.17.0 will fail to build docs if the below two constructors
// are included in the doxygen group; close here and reopen after as a workaround
/// @} pytypes
/// \addtogroup pytypes
/// @{
class none : public object {
public:
PYBIND11_OBJECT(none, object, detail::PyNone_Check)
none() : object(Py_None, borrowed_t{}) {}
};
class ellipsis : public object {
public:
PYBIND11_OBJECT(ellipsis, object, detail::PyEllipsis_Check)
ellipsis() : object(Py_Ellipsis, borrowed_t{}) {}
};
class bool_ : public object {
public:
PYBIND11_OBJECT_CVT(bool_, object, PyBool_Check, raw_bool)
bool_() : object(Py_False, borrowed_t{}) {}
// Allow implicit conversion from and to `bool`:
// NOLINTNEXTLINE(google-explicit-constructor)
bool_(bool value) : object(value ? Py_True : Py_False, borrowed_t{}) {}
// NOLINTNEXTLINE(google-explicit-constructor)
operator bool() const { return (m_ptr != nullptr) && PyLong_AsLong(m_ptr) != 0; }
private:
/// Return the truth value of an object -- always returns a new reference
static PyObject *raw_bool(PyObject *op) {
const auto value = PyObject_IsTrue(op);
if (value == -1) {
return nullptr;
}
return handle(value != 0 ? Py_True : Py_False).inc_ref().ptr();
}
};
PYBIND11_NAMESPACE_BEGIN(detail)
// Converts a value to the given unsigned type. If an error occurs, you get back (Unsigned) -1;
// otherwise you get back the unsigned long or unsigned long long value cast to (Unsigned).
// (The distinction is critically important when casting a returned -1 error value to some other
// unsigned type: (A)-1 != (B)-1 when A and B are unsigned types of different sizes).
template <typename Unsigned>
Unsigned as_unsigned(PyObject *o) {
if (sizeof(Unsigned) <= sizeof(unsigned long)) {
unsigned long v = PyLong_AsUnsignedLong(o);
return v == (unsigned long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v;
}
unsigned long long v = PyLong_AsUnsignedLongLong(o);
return v == (unsigned long long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v;
}
PYBIND11_NAMESPACE_END(detail)
class int_ : public object {
public:
PYBIND11_OBJECT_CVT(int_, object, PYBIND11_LONG_CHECK, PyNumber_Long)
int_() : object(PyLong_FromLong(0), stolen_t{}) {}
// Allow implicit conversion from C++ integral types:
template <typename T, detail::enable_if_t<std::is_integral<T>::value, int> = 0>
// NOLINTNEXTLINE(google-explicit-constructor)
int_(T value) {
if (sizeof(T) <= sizeof(long)) {
if (std::is_signed<T>::value) {
m_ptr = PyLong_FromLong((long) value);
} else {
m_ptr = PyLong_FromUnsignedLong((unsigned long) value);
}
} else {
if (std::is_signed<T>::value) {
m_ptr = PyLong_FromLongLong((long long) value);
} else {
m_ptr = PyLong_FromUnsignedLongLong((unsigned long long) value);
}
}
if (!m_ptr) {
pybind11_fail("Could not allocate int object!");
}
}
template <typename T, detail::enable_if_t<std::is_integral<T>::value, int> = 0>
// NOLINTNEXTLINE(google-explicit-constructor)
operator T() const {
return std::is_unsigned<T>::value ? detail::as_unsigned<T>(m_ptr)
: sizeof(T) <= sizeof(long) ? (T) PyLong_AsLong(m_ptr)
: (T) PYBIND11_LONG_AS_LONGLONG(m_ptr);
}
};
class float_ : public object {
public:
PYBIND11_OBJECT_CVT(float_, object, PyFloat_Check, PyNumber_Float)
// Allow implicit conversion from float/double:
// NOLINTNEXTLINE(google-explicit-constructor)
float_(float value) : object(PyFloat_FromDouble((double) value), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate float object!");
}
}
// NOLINTNEXTLINE(google-explicit-constructor)
float_(double value = .0) : object(PyFloat_FromDouble((double) value), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate float object!");
}
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator float() const { return (float) PyFloat_AsDouble(m_ptr); }
// NOLINTNEXTLINE(google-explicit-constructor)
operator double() const { return (double) PyFloat_AsDouble(m_ptr); }
};
class weakref : public object {
public:
PYBIND11_OBJECT_CVT_DEFAULT(weakref, object, PyWeakref_Check, raw_weakref)
explicit weakref(handle obj, handle callback = {})
: object(PyWeakref_NewRef(obj.ptr(), callback.ptr()), stolen_t{}) {
if (!m_ptr) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Could not allocate weak reference!");
}
}
private:
static PyObject *raw_weakref(PyObject *o) { return PyWeakref_NewRef(o, nullptr); }
};
class slice : public object {
public:
PYBIND11_OBJECT_DEFAULT(slice, object, PySlice_Check)
slice(handle start, handle stop, handle step)
: object(PySlice_New(start.ptr(), stop.ptr(), step.ptr()), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate slice object!");
}
}
#ifdef PYBIND11_HAS_OPTIONAL
slice(std::optional<ssize_t> start, std::optional<ssize_t> stop, std::optional<ssize_t> step)
: slice(index_to_object(start), index_to_object(stop), index_to_object(step)) {}
#else
slice(ssize_t start_, ssize_t stop_, ssize_t step_)
: slice(int_(start_), int_(stop_), int_(step_)) {}
#endif
bool
compute(size_t length, size_t *start, size_t *stop, size_t *step, size_t *slicelength) const {
return PySlice_GetIndicesEx((PYBIND11_SLICE_OBJECT *) m_ptr,
(ssize_t) length,
(ssize_t *) start,
(ssize_t *) stop,
(ssize_t *) step,
(ssize_t *) slicelength)
== 0;
}
bool compute(
ssize_t length, ssize_t *start, ssize_t *stop, ssize_t *step, ssize_t *slicelength) const {
return PySlice_GetIndicesEx(
(PYBIND11_SLICE_OBJECT *) m_ptr, length, start, stop, step, slicelength)
== 0;
}
private:
template <typename T>
static object index_to_object(T index) {
return index ? object(int_(*index)) : object(none());
}
};
class capsule : public object {
public:
PYBIND11_OBJECT_DEFAULT(capsule, object, PyCapsule_CheckExact)
PYBIND11_DEPRECATED("Use reinterpret_borrow<capsule>() or reinterpret_steal<capsule>()")
capsule(PyObject *ptr, bool is_borrowed)
: object(is_borrowed ? object(ptr, borrowed_t{}) : object(ptr, stolen_t{})) {}
explicit capsule(const void *value,
const char *name = nullptr,
PyCapsule_Destructor destructor = nullptr)
: object(PyCapsule_New(const_cast<void *>(value), name, destructor), stolen_t{}) {
if (!m_ptr) {
throw error_already_set();
}
}
PYBIND11_DEPRECATED("Please use the ctor with value, name, destructor args")
capsule(const void *value, PyCapsule_Destructor destructor)
: object(PyCapsule_New(const_cast<void *>(value), nullptr, destructor), stolen_t{}) {
if (!m_ptr) {
throw error_already_set();
}
}
/// Capsule name is nullptr.
capsule(const void *value, void (*destructor)(void *)) {
initialize_with_void_ptr_destructor(value, nullptr, destructor);
}
capsule(const void *value, const char *name, void (*destructor)(void *)) {
initialize_with_void_ptr_destructor(value, name, destructor);
}
explicit capsule(void (*destructor)()) {
m_ptr = PyCapsule_New(reinterpret_cast<void *>(destructor), nullptr, [](PyObject *o) {
const char *name = get_name_in_error_scope(o);
auto destructor = reinterpret_cast<void (*)()>(PyCapsule_GetPointer(o, name));
if (destructor == nullptr) {
throw error_already_set();
}
destructor();
});
if (!m_ptr) {
throw error_already_set();
}
}
template <typename T>
operator T *() const { // NOLINT(google-explicit-constructor)
return get_pointer<T>();
}
/// Get the pointer the capsule holds.
template <typename T = void>
T *get_pointer() const {
const auto *name = this->name();
T *result = static_cast<T *>(PyCapsule_GetPointer(m_ptr, name));
if (!result) {
throw error_already_set();
}
return result;
}
/// Replaces a capsule's pointer *without* calling the destructor on the existing one.
void set_pointer(const void *value) {
if (PyCapsule_SetPointer(m_ptr, const_cast<void *>(value)) != 0) {
throw error_already_set();
}
}
const char *name() const {
const char *name = PyCapsule_GetName(m_ptr);
if ((name == nullptr) && PyErr_Occurred()) {
throw error_already_set();
}
return name;
}
/// Replaces a capsule's name *without* calling the destructor on the existing one.
void set_name(const char *new_name) {
if (PyCapsule_SetName(m_ptr, new_name) != 0) {
throw error_already_set();
}
}
private:
static const char *get_name_in_error_scope(PyObject *o) {
error_scope error_guard;
const char *name = PyCapsule_GetName(o);
if ((name == nullptr) && PyErr_Occurred()) {
// write out and consume error raised by call to PyCapsule_GetName
PyErr_WriteUnraisable(o);
}
return name;
}
void initialize_with_void_ptr_destructor(const void *value,
const char *name,
void (*destructor)(void *)) {
m_ptr = PyCapsule_New(const_cast<void *>(value), name, [](PyObject *o) {
// guard if destructor called while err indicator is set
error_scope error_guard;
auto destructor = reinterpret_cast<void (*)(void *)>(PyCapsule_GetContext(o));
if (destructor == nullptr && PyErr_Occurred()) {
throw error_already_set();
}
const char *name = get_name_in_error_scope(o);
void *ptr = PyCapsule_GetPointer(o, name);
if (ptr == nullptr) {
throw error_already_set();
}
if (destructor != nullptr) {
destructor(ptr);
}
});
if (!m_ptr || PyCapsule_SetContext(m_ptr, reinterpret_cast<void *>(destructor)) != 0) {
throw error_already_set();
}
}
};
class tuple : public object {
public:
PYBIND11_OBJECT_CVT(tuple, object, PyTuple_Check, PySequence_Tuple)
template <typename SzType = ssize_t,
detail::enable_if_t<std::is_integral<SzType>::value, int> = 0>
// Some compilers generate link errors when using `const SzType &` here:
explicit tuple(SzType size = 0) : object(PyTuple_New(ssize_t_cast(size)), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate tuple object!");
}
}
size_t size() const { return (size_t) PyTuple_Size(m_ptr); }
bool empty() const { return size() == 0; }
detail::tuple_accessor operator[](size_t index) const { return {*this, index}; }
template <typename T, detail::enable_if_t<detail::is_pyobject<T>::value, int> = 0>
detail::item_accessor operator[](T &&o) const {
return object::operator[](std::forward<T>(o));
}
detail::tuple_iterator begin() const { return {*this, 0}; }
detail::tuple_iterator end() const { return {*this, PyTuple_GET_SIZE(m_ptr)}; }
};
// We need to put this into a separate function because the Intel compiler
// fails to compile enable_if_t<all_of<is_keyword_or_ds<Args>...>::value> part below
// (tested with ICC 2021.1 Beta 20200827).
template <typename... Args>
constexpr bool args_are_all_keyword_or_ds() {
return detail::all_of<detail::is_keyword_or_ds<Args>...>::value;
}
class dict : public object {
public:
PYBIND11_OBJECT_CVT(dict, object, PyDict_Check, raw_dict)
dict() : object(PyDict_New(), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate dict object!");
}
}
template <typename... Args,
typename = detail::enable_if_t<args_are_all_keyword_or_ds<Args...>()>,
// MSVC workaround: it can't compile an out-of-line definition, so defer the
// collector
typename collector = detail::deferred_t<detail::unpacking_collector<>, Args...>>
explicit dict(Args &&...args) : dict(collector(std::forward<Args>(args)...).kwargs()) {}
size_t size() const { return (size_t) PyDict_Size(m_ptr); }
bool empty() const { return size() == 0; }
detail::dict_iterator begin() const { return {*this, 0}; }
detail::dict_iterator end() const { return {}; }
void clear() /* py-non-const */ { PyDict_Clear(ptr()); }
template <typename T>
bool contains(T &&key) const {
auto result = PyDict_Contains(m_ptr, detail::object_or_cast(std::forward<T>(key)).ptr());
if (result == -1) {
throw error_already_set();
}
return result == 1;
}
private:
/// Call the `dict` Python type -- always returns a new reference
static PyObject *raw_dict(PyObject *op) {
if (PyDict_Check(op)) {
return handle(op).inc_ref().ptr();
}
return PyObject_CallFunctionObjArgs((PyObject *) &PyDict_Type, op, nullptr);
}
};
class sequence : public object {
public:
PYBIND11_OBJECT_DEFAULT(sequence, object, PySequence_Check)
size_t size() const {
ssize_t result = PySequence_Size(m_ptr);
if (result == -1) {
throw error_already_set();
}
return (size_t) result;
}
bool empty() const { return size() == 0; }
detail::sequence_accessor operator[](size_t index) const { return {*this, index}; }
template <typename T, detail::enable_if_t<detail::is_pyobject<T>::value, int> = 0>
detail::item_accessor operator[](T &&o) const {
return object::operator[](std::forward<T>(o));
}
detail::sequence_iterator begin() const { return {*this, 0}; }
detail::sequence_iterator end() const { return {*this, PySequence_Size(m_ptr)}; }
};
class list : public object {
public:
PYBIND11_OBJECT_CVT(list, object, PyList_Check, PySequence_List)
template <typename SzType = ssize_t,
detail::enable_if_t<std::is_integral<SzType>::value, int> = 0>
// Some compilers generate link errors when using `const SzType &` here:
explicit list(SzType size = 0) : object(PyList_New(ssize_t_cast(size)), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate list object!");
}
}
size_t size() const { return (size_t) PyList_Size(m_ptr); }
bool empty() const { return size() == 0; }
detail::list_accessor operator[](size_t index) const { return {*this, index}; }
template <typename T, detail::enable_if_t<detail::is_pyobject<T>::value, int> = 0>
detail::item_accessor operator[](T &&o) const {
return object::operator[](std::forward<T>(o));
}
detail::list_iterator begin() const { return {*this, 0}; }
detail::list_iterator end() const { return {*this, PyList_GET_SIZE(m_ptr)}; }
template <typename T>
void append(T &&val) /* py-non-const */ {
if (PyList_Append(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr()) != 0) {
throw error_already_set();
}
}
template <typename IdxType,
typename ValType,
detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
void insert(const IdxType &index, ValType &&val) /* py-non-const */ {
if (PyList_Insert(m_ptr,
ssize_t_cast(index),
detail::object_or_cast(std::forward<ValType>(val)).ptr())
!= 0) {
throw error_already_set();
}
}
void clear() /* py-non-const */ {
if (PyList_SetSlice(m_ptr, 0, PyList_Size(m_ptr), nullptr) == -1) {
throw error_already_set();
}
}
};
class args : public tuple {
PYBIND11_OBJECT_DEFAULT(args, tuple, PyTuple_Check)
};
class kwargs : public dict {
PYBIND11_OBJECT_DEFAULT(kwargs, dict, PyDict_Check)
};
class anyset : public object {
public:
PYBIND11_OBJECT(anyset, object, PyAnySet_Check)
size_t size() const { return static_cast<size_t>(PySet_Size(m_ptr)); }
bool empty() const { return size() == 0; }
template <typename T>
bool contains(T &&val) const {
auto result = PySet_Contains(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr());
if (result == -1) {
throw error_already_set();
}
return result == 1;
}
};
class set : public anyset {
public:
PYBIND11_OBJECT_CVT(set, anyset, PySet_Check, PySet_New)
set() : anyset(PySet_New(nullptr), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate set object!");
}
}
template <typename T>
bool add(T &&val) /* py-non-const */ {
return PySet_Add(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr()) == 0;
}
void clear() /* py-non-const */ { PySet_Clear(m_ptr); }
};
class frozenset : public anyset {
public:
PYBIND11_OBJECT_CVT(frozenset, anyset, PyFrozenSet_Check, PyFrozenSet_New)
};
class function : public object {
public:
PYBIND11_OBJECT_DEFAULT(function, object, PyCallable_Check)
handle cpp_function() const {
handle fun = detail::get_function(m_ptr);
if (fun && PyCFunction_Check(fun.ptr())) {
return fun;
}
return handle();
}
bool is_cpp_function() const { return (bool) cpp_function(); }
};
class staticmethod : public object {
public:
PYBIND11_OBJECT_CVT(staticmethod, object, detail::PyStaticMethod_Check, PyStaticMethod_New)
};
class buffer : public object {
public:
PYBIND11_OBJECT_DEFAULT(buffer, object, PyObject_CheckBuffer)
buffer_info request(bool writable = false) const {
int flags = PyBUF_STRIDES | PyBUF_FORMAT;
if (writable) {
flags |= PyBUF_WRITABLE;
}
auto *view = new Py_buffer();
if (PyObject_GetBuffer(m_ptr, view, flags) != 0) {
delete view;
throw error_already_set();
}
return buffer_info(view);
}
};
class memoryview : public object {
public:
PYBIND11_OBJECT_CVT(memoryview, object, PyMemoryView_Check, PyMemoryView_FromObject)
/** \rst
Creates ``memoryview`` from ``buffer_info``.
``buffer_info`` must be created from ``buffer::request()``. Otherwise
throws an exception.
For creating a ``memoryview`` from objects that support buffer protocol,
use ``memoryview(const object& obj)`` instead of this constructor.
\endrst */
explicit memoryview(const buffer_info &info) {
if (!info.view()) {
pybind11_fail("Prohibited to create memoryview without Py_buffer");
}
// Note: PyMemoryView_FromBuffer never increments obj reference.
m_ptr = (info.view()->obj) ? PyMemoryView_FromObject(info.view()->obj)
: PyMemoryView_FromBuffer(info.view());
if (!m_ptr) {
pybind11_fail("Unable to create memoryview from buffer descriptor");
}
}
/** \rst
Creates ``memoryview`` from static buffer.
This method is meant for providing a ``memoryview`` for C/C++ buffer not
managed by Python. The caller is responsible for managing the lifetime
of ``ptr`` and ``format``, which MUST outlive the memoryview constructed
here.
See also: Python C API documentation for `PyMemoryView_FromBuffer`_.
.. _PyMemoryView_FromBuffer:
https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromBuffer
:param ptr: Pointer to the buffer.
:param itemsize: Byte size of an element.
:param format: Pointer to the null-terminated format string. For
homogeneous Buffers, this should be set to
``format_descriptor<T>::value``.
:param shape: Shape of the tensor (1 entry per dimension).
:param strides: Number of bytes between adjacent entries (for each
per dimension).
:param readonly: Flag to indicate if the underlying storage may be
written to.
\endrst */
static memoryview from_buffer(void *ptr,
ssize_t itemsize,
const char *format,
detail::any_container<ssize_t> shape,
detail::any_container<ssize_t> strides,
bool readonly = false);
static memoryview from_buffer(const void *ptr,
ssize_t itemsize,
const char *format,
detail::any_container<ssize_t> shape,
detail::any_container<ssize_t> strides) {
return memoryview::from_buffer(
const_cast<void *>(ptr), itemsize, format, std::move(shape), std::move(strides), true);
}
template <typename T>
static memoryview from_buffer(T *ptr,
detail::any_container<ssize_t> shape,
detail::any_container<ssize_t> strides,
bool readonly = false) {
return memoryview::from_buffer(reinterpret_cast<void *>(ptr),
sizeof(T),
format_descriptor<T>::value,
std::move(shape),
std::move(strides),
readonly);
}
template <typename T>
static memoryview from_buffer(const T *ptr,
detail::any_container<ssize_t> shape,
detail::any_container<ssize_t> strides) {
return memoryview::from_buffer(
const_cast<T *>(ptr), std::move(shape), std::move(strides), true);
}
/** \rst
Creates ``memoryview`` from static memory.
This method is meant for providing a ``memoryview`` for C/C++ buffer not
managed by Python. The caller is responsible for managing the lifetime
of ``mem``, which MUST outlive the memoryview constructed here.
See also: Python C API documentation for `PyMemoryView_FromBuffer`_.
.. _PyMemoryView_FromMemory:
https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromMemory
\endrst */
static memoryview from_memory(void *mem, ssize_t size, bool readonly = false) {
PyObject *ptr = PyMemoryView_FromMemory(
reinterpret_cast<char *>(mem), size, (readonly) ? PyBUF_READ : PyBUF_WRITE);
if (!ptr) {
pybind11_fail("Could not allocate memoryview object!");
}
return memoryview(object(ptr, stolen_t{}));
}
static memoryview from_memory(const void *mem, ssize_t size) {
return memoryview::from_memory(const_cast<void *>(mem), size, true);
}
#ifdef PYBIND11_HAS_STRING_VIEW
static memoryview from_memory(std::string_view mem) {
return from_memory(const_cast<char *>(mem.data()), static_cast<ssize_t>(mem.size()), true);
}
#endif
};
/// @cond DUPLICATE
inline memoryview memoryview::from_buffer(void *ptr,
ssize_t itemsize,
const char *format,
detail::any_container<ssize_t> shape,
detail::any_container<ssize_t> strides,
bool readonly) {
size_t ndim = shape->size();
if (ndim != strides->size()) {
pybind11_fail("memoryview: shape length doesn't match strides length");
}
ssize_t size = ndim != 0u ? 1 : 0;
for (size_t i = 0; i < ndim; ++i) {
size *= (*shape)[i];
}
Py_buffer view;
view.buf = ptr;
view.obj = nullptr;
view.len = size * itemsize;
view.readonly = static_cast<int>(readonly);
view.itemsize = itemsize;
view.format = const_cast<char *>(format);
view.ndim = static_cast<int>(ndim);
view.shape = shape->data();
view.strides = strides->data();
view.suboffsets = nullptr;
view.internal = nullptr;
PyObject *obj = PyMemoryView_FromBuffer(&view);
if (!obj) {
throw error_already_set();
}
return memoryview(object(obj, stolen_t{}));
}
/// @endcond
/// @} pytypes
/// \addtogroup python_builtins
/// @{
/// Get the length of a Python object.
inline size_t len(handle h) {
ssize_t result = PyObject_Length(h.ptr());
if (result < 0) {
throw error_already_set();
}
return (size_t) result;
}
/// Get the length hint of a Python object.
/// Returns 0 when this cannot be determined.
inline size_t len_hint(handle h) {
ssize_t result = PyObject_LengthHint(h.ptr(), 0);
if (result < 0) {
// Sometimes a length can't be determined at all (eg generators)
// In which case simply return 0
PyErr_Clear();
return 0;
}
return (size_t) result;
}
inline str repr(handle h) {
PyObject *str_value = PyObject_Repr(h.ptr());
if (!str_value) {
throw error_already_set();
}
return reinterpret_steal<str>(str_value);
}
inline iterator iter(handle obj) {
PyObject *result = PyObject_GetIter(obj.ptr());
if (!result) {
throw error_already_set();
}
return reinterpret_steal<iterator>(result);
}
/// @} python_builtins
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename D>
iterator object_api<D>::begin() const {
return iter(derived());
}
template <typename D>
iterator object_api<D>::end() const {
return iterator::sentinel();
}
template <typename D>
item_accessor object_api<D>::operator[](handle key) const {
return {derived(), reinterpret_borrow<object>(key)};
}
template <typename D>
item_accessor object_api<D>::operator[](object &&key) const {
return {derived(), std::move(key)};
}
template <typename D>
item_accessor object_api<D>::operator[](const char *key) const {
return {derived(), pybind11::str(key)};
}
template <typename D>
obj_attr_accessor object_api<D>::attr(handle key) const {
return {derived(), reinterpret_borrow<object>(key)};
}
template <typename D>
obj_attr_accessor object_api<D>::attr(object &&key) const {
return {derived(), std::move(key)};
}
template <typename D>
str_attr_accessor object_api<D>::attr(const char *key) const {
return {derived(), key};
}
template <typename D>
args_proxy object_api<D>::operator*() const {
return args_proxy(derived().ptr());
}
template <typename D>
template <typename T>
bool object_api<D>::contains(T &&item) const {
return attr("__contains__")(std::forward<T>(item)).template cast<bool>();
}
template <typename D>
pybind11::str object_api<D>::str() const {
return pybind11::str(derived());
}
template <typename D>
str_attr_accessor object_api<D>::doc() const {
return attr("__doc__");
}
template <typename D>
handle object_api<D>::get_type() const {
return type::handle_of(derived());
}
template <typename D>
bool object_api<D>::rich_compare(object_api const &other, int value) const {
int rv = PyObject_RichCompareBool(derived().ptr(), other.derived().ptr(), value);
if (rv == -1) {
throw error_already_set();
}
return rv == 1;
}
#define PYBIND11_MATH_OPERATOR_UNARY(op, fn) \
template <typename D> \
object object_api<D>::op() const { \
object result = reinterpret_steal<object>(fn(derived().ptr())); \
if (!result.ptr()) \
throw error_already_set(); \
return result; \
}
#define PYBIND11_MATH_OPERATOR_BINARY(op, fn) \
template <typename D> \
object object_api<D>::op(object_api const &other) const { \
object result = reinterpret_steal<object>(fn(derived().ptr(), other.derived().ptr())); \
if (!result.ptr()) \
throw error_already_set(); \
return result; \
}
#define PYBIND11_MATH_OPERATOR_BINARY_INPLACE(iop, fn) \
template <typename D> \
object object_api<D>::iop(object_api const &other) { \
object result = reinterpret_steal<object>(fn(derived().ptr(), other.derived().ptr())); \
if (!result.ptr()) \
throw error_already_set(); \
return result; \
}
PYBIND11_MATH_OPERATOR_UNARY(operator~, PyNumber_Invert)
PYBIND11_MATH_OPERATOR_UNARY(operator-, PyNumber_Negative)
PYBIND11_MATH_OPERATOR_BINARY(operator+, PyNumber_Add)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator+=, PyNumber_InPlaceAdd)
PYBIND11_MATH_OPERATOR_BINARY(operator-, PyNumber_Subtract)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator-=, PyNumber_InPlaceSubtract)
PYBIND11_MATH_OPERATOR_BINARY(operator*, PyNumber_Multiply)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator*=, PyNumber_InPlaceMultiply)
PYBIND11_MATH_OPERATOR_BINARY(operator/, PyNumber_TrueDivide)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator/=, PyNumber_InPlaceTrueDivide)
PYBIND11_MATH_OPERATOR_BINARY(operator|, PyNumber_Or)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator|=, PyNumber_InPlaceOr)
PYBIND11_MATH_OPERATOR_BINARY(operator&, PyNumber_And)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator&=, PyNumber_InPlaceAnd)
PYBIND11_MATH_OPERATOR_BINARY(operator^, PyNumber_Xor)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator^=, PyNumber_InPlaceXor)
PYBIND11_MATH_OPERATOR_BINARY(operator<<, PyNumber_Lshift)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator<<=, PyNumber_InPlaceLshift)
PYBIND11_MATH_OPERATOR_BINARY(operator>>, PyNumber_Rshift)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator>>=, PyNumber_InPlaceRshift)
#undef PYBIND11_MATH_OPERATOR_UNARY
#undef PYBIND11_MATH_OPERATOR_BINARY
#undef PYBIND11_MATH_OPERATOR_BINARY_INPLACE
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
====================================================================================================================
SOURCE CODE FILE: stl.h
LINES: 1
SIZE: 15.61 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\stl.h
ENCODING: utf-8
```h
/*
pybind11/stl.h: Transparent conversion for STL data types
Copyright (c) 2016 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
#include "detail/common.h"
#include <deque>
#include <list>
#include <map>
#include <ostream>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <valarray>
// See `detail/common.h` for implementation of these guards.
#if defined(PYBIND11_HAS_OPTIONAL)
# include <optional>
#elif defined(PYBIND11_HAS_EXP_OPTIONAL)
# include <experimental/optional>
#endif
#if defined(PYBIND11_HAS_VARIANT)
# include <variant>
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
/// Extracts an const lvalue reference or rvalue reference for U based on the type of T (e.g. for
/// forwarding a container element). Typically used indirect via forwarded_type(), below.
template <typename T, typename U>
using forwarded_type = conditional_t<std::is_lvalue_reference<T>::value,
remove_reference_t<U> &,
remove_reference_t<U> &&>;
/// Forwards a value U as rvalue or lvalue according to whether T is rvalue or lvalue; typically
/// used for forwarding a container's elements.
template <typename T, typename U>
constexpr forwarded_type<T, U> forward_like(U &&u) {
return std::forward<detail::forwarded_type<T, U>>(std::forward<U>(u));
}
// Checks if a container has a STL style reserve method.
// This will only return true for a `reserve()` with a `void` return.
template <typename C>
using has_reserve_method = std::is_same<decltype(std::declval<C>().reserve(0)), void>;
template <typename Type, typename Key>
struct set_caster {
using type = Type;
using key_conv = make_caster<Key>;
private:
template <typename T = Type, enable_if_t<has_reserve_method<T>::value, int> = 0>
void reserve_maybe(const anyset &s, Type *) {
value.reserve(s.size());
}
void reserve_maybe(const anyset &, void *) {}
public:
bool load(handle src, bool convert) {
if (!isinstance<anyset>(src)) {
return false;
}
auto s = reinterpret_borrow<anyset>(src);
value.clear();
reserve_maybe(s, &value);
for (auto entry : s) {
key_conv conv;
if (!conv.load(entry, convert)) {
return false;
}
value.insert(cast_op<Key &&>(std::move(conv)));
}
return true;
}
template <typename T>
static handle cast(T &&src, return_value_policy policy, handle parent) {
if (!std::is_lvalue_reference<T>::value) {
policy = return_value_policy_override<Key>::policy(policy);
}
pybind11::set s;
for (auto &&value : src) {
auto value_ = reinterpret_steal<object>(
key_conv::cast(detail::forward_like<T>(value), policy, parent));
if (!value_ || !s.add(std::move(value_))) {
return handle();
}
}
return s.release();
}
PYBIND11_TYPE_CASTER(type, const_name("set[") + key_conv::name + const_name("]"));
};
template <typename Type, typename Key, typename Value>
struct map_caster {
using key_conv = make_caster<Key>;
using value_conv = make_caster<Value>;
private:
template <typename T = Type, enable_if_t<has_reserve_method<T>::value, int> = 0>
void reserve_maybe(const dict &d, Type *) {
value.reserve(d.size());
}
void reserve_maybe(const dict &, void *) {}
public:
bool load(handle src, bool convert) {
if (!isinstance<dict>(src)) {
return false;
}
auto d = reinterpret_borrow<dict>(src);
value.clear();
reserve_maybe(d, &value);
for (auto it : d) {
key_conv kconv;
value_conv vconv;
if (!kconv.load(it.first.ptr(), convert) || !vconv.load(it.second.ptr(), convert)) {
return false;
}
value.emplace(cast_op<Key &&>(std::move(kconv)), cast_op<Value &&>(std::move(vconv)));
}
return true;
}
template <typename T>
static handle cast(T &&src, return_value_policy policy, handle parent) {
dict d;
return_value_policy policy_key = policy;
return_value_policy policy_value = policy;
if (!std::is_lvalue_reference<T>::value) {
policy_key = return_value_policy_override<Key>::policy(policy_key);
policy_value = return_value_policy_override<Value>::policy(policy_value);
}
for (auto &&kv : src) {
auto key = reinterpret_steal<object>(
key_conv::cast(detail::forward_like<T>(kv.first), policy_key, parent));
auto value = reinterpret_steal<object>(
value_conv::cast(detail::forward_like<T>(kv.second), policy_value, parent));
if (!key || !value) {
return handle();
}
d[std::move(key)] = std::move(value);
}
return d.release();
}
PYBIND11_TYPE_CASTER(Type,
const_name("dict[") + key_conv::name + const_name(", ") + value_conv::name
+ const_name("]"));
};
template <typename Type, typename Value>
struct list_caster {
using value_conv = make_caster<Value>;
bool load(handle src, bool convert) {
if (!isinstance<sequence>(src) || isinstance<bytes>(src) || isinstance<str>(src)) {
return false;
}
auto s = reinterpret_borrow<sequence>(src);
value.clear();
reserve_maybe(s, &value);
for (const auto &it : s) {
value_conv conv;
if (!conv.load(it, convert)) {
return false;
}
value.push_back(cast_op<Value &&>(std::move(conv)));
}
return true;
}
private:
template <typename T = Type, enable_if_t<has_reserve_method<T>::value, int> = 0>
void reserve_maybe(const sequence &s, Type *) {
value.reserve(s.size());
}
void reserve_maybe(const sequence &, void *) {}
public:
template <typename T>
static handle cast(T &&src, return_value_policy policy, handle parent) {
if (!std::is_lvalue_reference<T>::value) {
policy = return_value_policy_override<Value>::policy(policy);
}
list l(src.size());
ssize_t index = 0;
for (auto &&value : src) {
auto value_ = reinterpret_steal<object>(
value_conv::cast(detail::forward_like<T>(value), policy, parent));
if (!value_) {
return handle();
}
PyList_SET_ITEM(l.ptr(), index++, value_.release().ptr()); // steals a reference
}
return l.release();
}
PYBIND11_TYPE_CASTER(Type, const_name("list[") + value_conv::name + const_name("]"));
};
template <typename Type, typename Alloc>
struct type_caster<std::vector<Type, Alloc>> : list_caster<std::vector<Type, Alloc>, Type> {};
template <typename Type, typename Alloc>
struct type_caster<std::deque<Type, Alloc>> : list_caster<std::deque<Type, Alloc>, Type> {};
template <typename Type, typename Alloc>
struct type_caster<std::list<Type, Alloc>> : list_caster<std::list<Type, Alloc>, Type> {};
template <typename ArrayType, typename Value, bool Resizable, size_t Size = 0>
struct array_caster {
using value_conv = make_caster<Value>;
private:
template <bool R = Resizable>
bool require_size(enable_if_t<R, size_t> size) {
if (value.size() != size) {
value.resize(size);
}
return true;
}
template <bool R = Resizable>
bool require_size(enable_if_t<!R, size_t> size) {
return size == Size;
}
public:
bool load(handle src, bool convert) {
if (!isinstance<sequence>(src)) {
return false;
}
auto l = reinterpret_borrow<sequence>(src);
if (!require_size(l.size())) {
return false;
}
size_t ctr = 0;
for (const auto &it : l) {
value_conv conv;
if (!conv.load(it, convert)) {
return false;
}
value[ctr++] = cast_op<Value &&>(std::move(conv));
}
return true;
}
template <typename T>
static handle cast(T &&src, return_value_policy policy, handle parent) {
list l(src.size());
ssize_t index = 0;
for (auto &&value : src) {
auto value_ = reinterpret_steal<object>(
value_conv::cast(detail::forward_like<T>(value), policy, parent));
if (!value_) {
return handle();
}
PyList_SET_ITEM(l.ptr(), index++, value_.release().ptr()); // steals a reference
}
return l.release();
}
PYBIND11_TYPE_CASTER(ArrayType,
const_name<Resizable>(const_name(""), const_name("Annotated["))
+ const_name("list[") + value_conv::name + const_name("]")
+ const_name<Resizable>(const_name(""),
const_name(", FixedSize(")
+ const_name<Size>() + const_name(")]")));
};
template <typename Type, size_t Size>
struct type_caster<std::array<Type, Size>>
: array_caster<std::array<Type, Size>, Type, false, Size> {};
template <typename Type>
struct type_caster<std::valarray<Type>> : array_caster<std::valarray<Type>, Type, true> {};
template <typename Key, typename Compare, typename Alloc>
struct type_caster<std::set<Key, Compare, Alloc>>
: set_caster<std::set<Key, Compare, Alloc>, Key> {};
template <typename Key, typename Hash, typename Equal, typename Alloc>
struct type_caster<std::unordered_set<Key, Hash, Equal, Alloc>>
: set_caster<std::unordered_set<Key, Hash, Equal, Alloc>, Key> {};
template <typename Key, typename Value, typename Compare, typename Alloc>
struct type_caster<std::map<Key, Value, Compare, Alloc>>
: map_caster<std::map<Key, Value, Compare, Alloc>, Key, Value> {};
template <typename Key, typename Value, typename Hash, typename Equal, typename Alloc>
struct type_caster<std::unordered_map<Key, Value, Hash, Equal, Alloc>>
: map_caster<std::unordered_map<Key, Value, Hash, Equal, Alloc>, Key, Value> {};
// This type caster is intended to be used for std::optional and std::experimental::optional
template <typename Type, typename Value = typename Type::value_type>
struct optional_caster {
using value_conv = make_caster<Value>;
template <typename T>
static handle cast(T &&src, return_value_policy policy, handle parent) {
if (!src) {
return none().release();
}
if (!std::is_lvalue_reference<T>::value) {
policy = return_value_policy_override<Value>::policy(policy);
}
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
return value_conv::cast(*std::forward<T>(src), policy, parent);
}
bool load(handle src, bool convert) {
if (!src) {
return false;
}
if (src.is_none()) {
return true; // default-constructed value is already empty
}
value_conv inner_caster;
if (!inner_caster.load(src, convert)) {
return false;
}
value.emplace(cast_op<Value &&>(std::move(inner_caster)));
return true;
}
PYBIND11_TYPE_CASTER(Type, const_name("Optional[") + value_conv::name + const_name("]"));
};
#if defined(PYBIND11_HAS_OPTIONAL)
template <typename T>
struct type_caster<std::optional<T>> : public optional_caster<std::optional<T>> {};
template <>
struct type_caster<std::nullopt_t> : public void_caster<std::nullopt_t> {};
#endif
#if defined(PYBIND11_HAS_EXP_OPTIONAL)
template <typename T>
struct type_caster<std::experimental::optional<T>>
: public optional_caster<std::experimental::optional<T>> {};
template <>
struct type_caster<std::experimental::nullopt_t>
: public void_caster<std::experimental::nullopt_t> {};
#endif
/// Visit a variant and cast any found type to Python
struct variant_caster_visitor {
return_value_policy policy;
handle parent;
using result_type = handle; // required by boost::variant in C++11
template <typename T>
result_type operator()(T &&src) const {
return make_caster<T>::cast(std::forward<T>(src), policy, parent);
}
};
/// Helper class which abstracts away variant's `visit` function. `std::variant` and similar
/// `namespace::variant` types which provide a `namespace::visit()` function are handled here
/// automatically using argument-dependent lookup. Users can provide specializations for other
/// variant-like classes, e.g. `boost::variant` and `boost::apply_visitor`.
template <template <typename...> class Variant>
struct visit_helper {
template <typename... Args>
static auto call(Args &&...args) -> decltype(visit(std::forward<Args>(args)...)) {
return visit(std::forward<Args>(args)...);
}
};
/// Generic variant caster
template <typename Variant>
struct variant_caster;
template <template <typename...> class V, typename... Ts>
struct variant_caster<V<Ts...>> {
static_assert(sizeof...(Ts) > 0, "Variant must consist of at least one alternative.");
template <typename U, typename... Us>
bool load_alternative(handle src, bool convert, type_list<U, Us...>) {
auto caster = make_caster<U>();
if (caster.load(src, convert)) {
value = cast_op<U>(std::move(caster));
return true;
}
return load_alternative(src, convert, type_list<Us...>{});
}
bool load_alternative(handle, bool, type_list<>) { return false; }
bool load(handle src, bool convert) {
// Do a first pass without conversions to improve constructor resolution.
// E.g. `py::int_(1).cast<variant<double, int>>()` needs to fill the `int`
// slot of the variant. Without two-pass loading `double` would be filled
// because it appears first and a conversion is possible.
if (convert && load_alternative(src, false, type_list<Ts...>{})) {
return true;
}
return load_alternative(src, convert, type_list<Ts...>{});
}
template <typename Variant>
static handle cast(Variant &&src, return_value_policy policy, handle parent) {
return visit_helper<V>::call(variant_caster_visitor{policy, parent},
std::forward<Variant>(src));
}
using Type = V<Ts...>;
PYBIND11_TYPE_CASTER(Type,
const_name("Union[")
+ ::pybind11::detail::concat(make_caster<Ts>::name...)
+ const_name("]"));
};
#if defined(PYBIND11_HAS_VARIANT)
template <typename... Ts>
struct type_caster<std::variant<Ts...>> : variant_caster<std::variant<Ts...>> {};
template <>
struct type_caster<std::monostate> : public void_caster<std::monostate> {};
#endif
PYBIND11_NAMESPACE_END(detail)
inline std::ostream &operator<<(std::ostream &os, const handle &obj) {
#ifdef PYBIND11_HAS_STRING_VIEW
os << str(obj).cast<std::string_view>();
#else
os << (std::string) str(obj);
#endif
return os;
}
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=========================================================================================================================
SOURCE CODE FILE: stl_bind.h
LINES: 1
SIZE: 28.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\stl_bind.h
ENCODING: utf-8
```h
/*
pybind11/std_bind.h: Binding generators for STL data types
Copyright (c) 2016 Sergey Lyskov and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
#include "detail/type_caster_base.h"
#include "cast.h"
#include "operators.h"
#include <algorithm>
#include <sstream>
#include <type_traits>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
/* SFINAE helper class used by 'is_comparable */
template <typename T>
struct container_traits {
template <typename T2>
static std::true_type
test_comparable(decltype(std::declval<const T2 &>() == std::declval<const T2 &>()) *);
template <typename T2>
static std::false_type test_comparable(...);
template <typename T2>
static std::true_type test_value(typename T2::value_type *);
template <typename T2>
static std::false_type test_value(...);
template <typename T2>
static std::true_type test_pair(typename T2::first_type *, typename T2::second_type *);
template <typename T2>
static std::false_type test_pair(...);
static constexpr const bool is_comparable
= std::is_same<std::true_type, decltype(test_comparable<T>(nullptr))>::value;
static constexpr const bool is_pair
= std::is_same<std::true_type, decltype(test_pair<T>(nullptr, nullptr))>::value;
static constexpr const bool is_vector
= std::is_same<std::true_type, decltype(test_value<T>(nullptr))>::value;
static constexpr const bool is_element = !is_pair && !is_vector;
};
/* Default: is_comparable -> std::false_type */
template <typename T, typename SFINAE = void>
struct is_comparable : std::false_type {};
/* For non-map data structures, check whether operator== can be instantiated */
template <typename T>
struct is_comparable<
T,
enable_if_t<container_traits<T>::is_element && container_traits<T>::is_comparable>>
: std::true_type {};
/* For a vector/map data structure, recursively check the value type
(which is std::pair for maps) */
template <typename T>
struct is_comparable<T, enable_if_t<container_traits<T>::is_vector>>
: is_comparable<typename recursive_container_traits<T>::type_to_check_recursively> {};
template <>
struct is_comparable<recursive_bottom> : std::true_type {};
/* For pairs, recursively check the two data types */
template <typename T>
struct is_comparable<T, enable_if_t<container_traits<T>::is_pair>> {
static constexpr const bool value = is_comparable<typename T::first_type>::value
&& is_comparable<typename T::second_type>::value;
};
/* Fallback functions */
template <typename, typename, typename... Args>
void vector_if_copy_constructible(const Args &...) {}
template <typename, typename, typename... Args>
void vector_if_equal_operator(const Args &...) {}
template <typename, typename, typename... Args>
void vector_if_insertion_operator(const Args &...) {}
template <typename, typename, typename... Args>
void vector_modifiers(const Args &...) {}
template <typename Vector, typename Class_>
void vector_if_copy_constructible(enable_if_t<is_copy_constructible<Vector>::value, Class_> &cl) {
cl.def(init<const Vector &>(), "Copy constructor");
}
template <typename Vector, typename Class_>
void vector_if_equal_operator(enable_if_t<is_comparable<Vector>::value, Class_> &cl) {
using T = typename Vector::value_type;
cl.def(self == self);
cl.def(self != self);
cl.def(
"count",
[](const Vector &v, const T &x) { return std::count(v.begin(), v.end(), x); },
arg("x"),
"Return the number of times ``x`` appears in the list");
cl.def(
"remove",
[](Vector &v, const T &x) {
auto p = std::find(v.begin(), v.end(), x);
if (p != v.end()) {
v.erase(p);
} else {
throw value_error();
}
},
arg("x"),
"Remove the first item from the list whose value is x. "
"It is an error if there is no such item.");
cl.def(
"__contains__",
[](const Vector &v, const T &x) { return std::find(v.begin(), v.end(), x) != v.end(); },
arg("x"),
"Return true the container contains ``x``");
}
// Vector modifiers -- requires a copyable vector_type:
// (Technically, some of these (pop and __delitem__) don't actually require copyability, but it
// seems silly to allow deletion but not insertion, so include them here too.)
template <typename Vector, typename Class_>
void vector_modifiers(
enable_if_t<is_copy_constructible<typename Vector::value_type>::value, Class_> &cl) {
using T = typename Vector::value_type;
using SizeType = typename Vector::size_type;
using DiffType = typename Vector::difference_type;
auto wrap_i = [](DiffType i, SizeType n) {
if (i < 0) {
i += n;
}
if (i < 0 || (SizeType) i >= n) {
throw index_error();
}
return i;
};
cl.def(
"append",
[](Vector &v, const T &value) { v.push_back(value); },
arg("x"),
"Add an item to the end of the list");
cl.def(init([](const iterable &it) {
auto v = std::unique_ptr<Vector>(new Vector());
v->reserve(len_hint(it));
for (handle h : it) {
v->push_back(h.cast<T>());
}
return v.release();
}));
cl.def("clear", [](Vector &v) { v.clear(); }, "Clear the contents");
cl.def(
"extend",
[](Vector &v, const Vector &src) { v.insert(v.end(), src.begin(), src.end()); },
arg("L"),
"Extend the list by appending all the items in the given list");
cl.def(
"extend",
[](Vector &v, const iterable &it) {
const size_t old_size = v.size();
v.reserve(old_size + len_hint(it));
try {
for (handle h : it) {
v.push_back(h.cast<T>());
}
} catch (const cast_error &) {
v.erase(v.begin() + static_cast<typename Vector::difference_type>(old_size),
v.end());
try {
v.shrink_to_fit();
} catch (const std::exception &) { // NOLINT(bugprone-empty-catch)
// Do nothing
}
throw;
}
},
arg("L"),
"Extend the list by appending all the items in the given list");
cl.def(
"insert",
[](Vector &v, DiffType i, const T &x) {
// Can't use wrap_i; i == v.size() is OK
if (i < 0) {
i += v.size();
}
if (i < 0 || (SizeType) i > v.size()) {
throw index_error();
}
v.insert(v.begin() + i, x);
},
arg("i"),
arg("x"),
"Insert an item at a given position.");
cl.def(
"pop",
[](Vector &v) {
if (v.empty()) {
throw index_error();
}
T t = std::move(v.back());
v.pop_back();
return t;
},
"Remove and return the last item");
cl.def(
"pop",
[wrap_i](Vector &v, DiffType i) {
i = wrap_i(i, v.size());
T t = std::move(v[(SizeType) i]);
v.erase(std::next(v.begin(), i));
return t;
},
arg("i"),
"Remove and return the item at index ``i``");
cl.def("__setitem__", [wrap_i](Vector &v, DiffType i, const T &t) {
i = wrap_i(i, v.size());
v[(SizeType) i] = t;
});
/// Slicing protocol
cl.def(
"__getitem__",
[](const Vector &v, const slice &slice) -> Vector * {
size_t start = 0, stop = 0, step = 0, slicelength = 0;
if (!slice.compute(v.size(), &start, &stop, &step, &slicelength)) {
throw error_already_set();
}
auto *seq = new Vector();
seq->reserve((size_t) slicelength);
for (size_t i = 0; i < slicelength; ++i) {
seq->push_back(v[start]);
start += step;
}
return seq;
},
arg("s"),
"Retrieve list elements using a slice object");
cl.def(
"__setitem__",
[](Vector &v, const slice &slice, const Vector &value) {
size_t start = 0, stop = 0, step = 0, slicelength = 0;
if (!slice.compute(v.size(), &start, &stop, &step, &slicelength)) {
throw error_already_set();
}
if (slicelength != value.size()) {
throw std::runtime_error(
"Left and right hand size of slice assignment have different sizes!");
}
for (size_t i = 0; i < slicelength; ++i) {
v[start] = value[i];
start += step;
}
},
"Assign list elements using a slice object");
cl.def(
"__delitem__",
[wrap_i](Vector &v, DiffType i) {
i = wrap_i(i, v.size());
v.erase(v.begin() + i);
},
"Delete the list elements at index ``i``");
cl.def(
"__delitem__",
[](Vector &v, const slice &slice) {
size_t start = 0, stop = 0, step = 0, slicelength = 0;
if (!slice.compute(v.size(), &start, &stop, &step, &slicelength)) {
throw error_already_set();
}
if (step == 1 && false) {
v.erase(v.begin() + (DiffType) start, v.begin() + DiffType(start + slicelength));
} else {
for (size_t i = 0; i < slicelength; ++i) {
v.erase(v.begin() + DiffType(start));
start += step - 1;
}
}
},
"Delete list elements using a slice object");
}
// If the type has an operator[] that doesn't return a reference (most notably std::vector<bool>),
// we have to access by copying; otherwise we return by reference.
template <typename Vector>
using vector_needs_copy
= negation<std::is_same<decltype(std::declval<Vector>()[typename Vector::size_type()]),
typename Vector::value_type &>>;
// The usual case: access and iterate by reference
template <typename Vector, typename Class_>
void vector_accessor(enable_if_t<!vector_needs_copy<Vector>::value, Class_> &cl) {
using T = typename Vector::value_type;
using SizeType = typename Vector::size_type;
using DiffType = typename Vector::difference_type;
using ItType = typename Vector::iterator;
auto wrap_i = [](DiffType i, SizeType n) {
if (i < 0) {
i += n;
}
if (i < 0 || (SizeType) i >= n) {
throw index_error();
}
return i;
};
cl.def(
"__getitem__",
[wrap_i](Vector &v, DiffType i) -> T & {
i = wrap_i(i, v.size());
return v[(SizeType) i];
},
return_value_policy::reference_internal // ref + keepalive
);
cl.def(
"__iter__",
[](Vector &v) {
return make_iterator<return_value_policy::reference_internal, ItType, ItType, T &>(
v.begin(), v.end());
},
keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */
);
}
// The case for special objects, like std::vector<bool>, that have to be returned-by-copy:
template <typename Vector, typename Class_>
void vector_accessor(enable_if_t<vector_needs_copy<Vector>::value, Class_> &cl) {
using T = typename Vector::value_type;
using SizeType = typename Vector::size_type;
using DiffType = typename Vector::difference_type;
using ItType = typename Vector::iterator;
cl.def("__getitem__", [](const Vector &v, DiffType i) -> T {
if (i < 0) {
i += v.size();
if (i < 0) {
throw index_error();
}
}
auto i_st = static_cast<SizeType>(i);
if (i_st >= v.size()) {
throw index_error();
}
return v[i_st];
});
cl.def(
"__iter__",
[](Vector &v) {
return make_iterator<return_value_policy::copy, ItType, ItType, T>(v.begin(), v.end());
},
keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */
);
}
template <typename Vector, typename Class_>
auto vector_if_insertion_operator(Class_ &cl, std::string const &name)
-> decltype(std::declval<std::ostream &>() << std::declval<typename Vector::value_type>(),
void()) {
using size_type = typename Vector::size_type;
cl.def(
"__repr__",
[name](Vector &v) {
std::ostringstream s;
s << name << '[';
for (size_type i = 0; i < v.size(); ++i) {
s << v[i];
if (i != v.size() - 1) {
s << ", ";
}
}
s << ']';
return s.str();
},
"Return the canonical string representation of this list.");
}
// Provide the buffer interface for vectors if we have data() and we have a format for it
// GCC seems to have "void std::vector<bool>::data()" - doing SFINAE on the existence of data()
// is insufficient, we need to check it returns an appropriate pointer
template <typename Vector, typename = void>
struct vector_has_data_and_format : std::false_type {};
template <typename Vector>
struct vector_has_data_and_format<
Vector,
enable_if_t<std::is_same<decltype(format_descriptor<typename Vector::value_type>::format(),
std::declval<Vector>().data()),
typename Vector::value_type *>::value>> : std::true_type {};
// [workaround(intel)] Separate function required here
// Workaround as the Intel compiler does not compile the enable_if_t part below
// (tested with icc (ICC) 2021.1 Beta 20200827)
template <typename... Args>
constexpr bool args_any_are_buffer() {
return detail::any_of<std::is_same<Args, buffer_protocol>...>::value;
}
// [workaround(intel)] Separate function required here
// [workaround(msvc)] Can't use constexpr bool in return type
// Add the buffer interface to a vector
template <typename Vector, typename Class_, typename... Args>
void vector_buffer_impl(Class_ &cl, std::true_type) {
using T = typename Vector::value_type;
static_assert(vector_has_data_and_format<Vector>::value,
"There is not an appropriate format descriptor for this vector");
// numpy.h declares this for arbitrary types, but it may raise an exception and crash hard
// at runtime if PYBIND11_NUMPY_DTYPE hasn't been called, so check here
format_descriptor<T>::format();
cl.def_buffer([](Vector &v) -> buffer_info {
return buffer_info(v.data(),
static_cast<ssize_t>(sizeof(T)),
format_descriptor<T>::format(),
1,
{v.size()},
{sizeof(T)});
});
cl.def(init([](const buffer &buf) {
auto info = buf.request();
if (info.ndim != 1 || info.strides[0] % static_cast<ssize_t>(sizeof(T))) {
throw type_error("Only valid 1D buffers can be copied to a vector");
}
if (!detail::compare_buffer_info<T>::compare(info)
|| (ssize_t) sizeof(T) != info.itemsize) {
throw type_error("Format mismatch (Python: " + info.format
+ " C++: " + format_descriptor<T>::format() + ")");
}
T *p = static_cast<T *>(info.ptr);
ssize_t step = info.strides[0] / static_cast<ssize_t>(sizeof(T));
T *end = p + info.shape[0] * step;
if (step == 1) {
return Vector(p, end);
}
Vector vec;
vec.reserve((size_t) info.shape[0]);
for (; p != end; p += step) {
vec.push_back(*p);
}
return vec;
}));
return;
}
template <typename Vector, typename Class_, typename... Args>
void vector_buffer_impl(Class_ &, std::false_type) {}
template <typename Vector, typename Class_, typename... Args>
void vector_buffer(Class_ &cl) {
vector_buffer_impl<Vector, Class_, Args...>(
cl, detail::any_of<std::is_same<Args, buffer_protocol>...>{});
}
PYBIND11_NAMESPACE_END(detail)
//
// std::vector
//
template <typename Vector, typename holder_type = std::unique_ptr<Vector>, typename... Args>
class_<Vector, holder_type> bind_vector(handle scope, std::string const &name, Args &&...args) {
using Class_ = class_<Vector, holder_type>;
// If the value_type is unregistered (e.g. a converting type) or is itself registered
// module-local then make the vector binding module-local as well:
using vtype = typename Vector::value_type;
auto *vtype_info = detail::get_type_info(typeid(vtype));
bool local = !vtype_info || vtype_info->module_local;
Class_ cl(scope, name.c_str(), pybind11::module_local(local), std::forward<Args>(args)...);
// Declare the buffer interface if a buffer_protocol() is passed in
detail::vector_buffer<Vector, Class_, Args...>(cl);
cl.def(init<>());
// Register copy constructor (if possible)
detail::vector_if_copy_constructible<Vector, Class_>(cl);
// Register comparison-related operators and functions (if possible)
detail::vector_if_equal_operator<Vector, Class_>(cl);
// Register stream insertion operator (if possible)
detail::vector_if_insertion_operator<Vector, Class_>(cl, name);
// Modifiers require copyable vector value type
detail::vector_modifiers<Vector, Class_>(cl);
// Accessor and iterator; return by value if copyable, otherwise we return by ref + keep-alive
detail::vector_accessor<Vector, Class_>(cl);
cl.def(
"__bool__",
[](const Vector &v) -> bool { return !v.empty(); },
"Check whether the list is nonempty");
cl.def("__len__", [](const Vector &vec) { return vec.size(); });
#if 0
// C++ style functions deprecated, leaving it here as an example
cl.def(init<size_type>());
cl.def("resize",
(void (Vector::*) (size_type count)) & Vector::resize,
"changes the number of elements stored");
cl.def("erase",
[](Vector &v, SizeType i) {
if (i >= v.size())
throw index_error();
v.erase(v.begin() + i);
}, "erases element at index ``i``");
cl.def("empty", &Vector::empty, "checks whether the container is empty");
cl.def("size", &Vector::size, "returns the number of elements");
cl.def("push_back", (void (Vector::*)(const T&)) &Vector::push_back, "adds an element to the end");
cl.def("pop_back", &Vector::pop_back, "removes the last element");
cl.def("max_size", &Vector::max_size, "returns the maximum possible number of elements");
cl.def("reserve", &Vector::reserve, "reserves storage");
cl.def("capacity", &Vector::capacity, "returns the number of elements that can be held in currently allocated storage");
cl.def("shrink_to_fit", &Vector::shrink_to_fit, "reduces memory usage by freeing unused memory");
cl.def("clear", &Vector::clear, "clears the contents");
cl.def("swap", &Vector::swap, "swaps the contents");
cl.def("front", [](Vector &v) {
if (v.size()) return v.front();
else throw index_error();
}, "access the first element");
cl.def("back", [](Vector &v) {
if (v.size()) return v.back();
else throw index_error();
}, "access the last element ");
#endif
return cl;
}
//
// std::map, std::unordered_map
//
PYBIND11_NAMESPACE_BEGIN(detail)
/* Fallback functions */
template <typename, typename, typename... Args>
void map_if_insertion_operator(const Args &...) {}
template <typename, typename, typename... Args>
void map_assignment(const Args &...) {}
// Map assignment when copy-assignable: just copy the value
template <typename Map, typename Class_>
void map_assignment(
enable_if_t<is_copy_assignable<typename Map::mapped_type>::value, Class_> &cl) {
using KeyType = typename Map::key_type;
using MappedType = typename Map::mapped_type;
cl.def("__setitem__", [](Map &m, const KeyType &k, const MappedType &v) {
auto it = m.find(k);
if (it != m.end()) {
it->second = v;
} else {
m.emplace(k, v);
}
});
}
// Not copy-assignable, but still copy-constructible: we can update the value by erasing and
// reinserting
template <typename Map, typename Class_>
void map_assignment(enable_if_t<!is_copy_assignable<typename Map::mapped_type>::value
&& is_copy_constructible<typename Map::mapped_type>::value,
Class_> &cl) {
using KeyType = typename Map::key_type;
using MappedType = typename Map::mapped_type;
cl.def("__setitem__", [](Map &m, const KeyType &k, const MappedType &v) {
// We can't use m[k] = v; because value type might not be default constructable
auto r = m.emplace(k, v);
if (!r.second) {
// value type is not copy assignable so the only way to insert it is to erase it
// first...
m.erase(r.first);
m.emplace(k, v);
}
});
}
template <typename Map, typename Class_>
auto map_if_insertion_operator(Class_ &cl, std::string const &name)
-> decltype(std::declval<std::ostream &>() << std::declval<typename Map::key_type>()
<< std::declval<typename Map::mapped_type>(),
void()) {
cl.def(
"__repr__",
[name](Map &m) {
std::ostringstream s;
s << name << '{';
bool f = false;
for (auto const &kv : m) {
if (f) {
s << ", ";
}
s << kv.first << ": " << kv.second;
f = true;
}
s << '}';
return s.str();
},
"Return the canonical string representation of this map.");
}
struct keys_view {
virtual size_t len() = 0;
virtual iterator iter() = 0;
virtual bool contains(const handle &k) = 0;
virtual ~keys_view() = default;
};
struct values_view {
virtual size_t len() = 0;
virtual iterator iter() = 0;
virtual ~values_view() = default;
};
struct items_view {
virtual size_t len() = 0;
virtual iterator iter() = 0;
virtual ~items_view() = default;
};
template <typename Map>
struct KeysViewImpl : public detail::keys_view {
explicit KeysViewImpl(Map &map) : map(map) {}
size_t len() override { return map.size(); }
iterator iter() override { return make_key_iterator(map.begin(), map.end()); }
bool contains(const handle &k) override {
try {
return map.find(k.template cast<typename Map::key_type>()) != map.end();
} catch (const cast_error &) {
return false;
}
}
Map ↦
};
template <typename Map>
struct ValuesViewImpl : public detail::values_view {
explicit ValuesViewImpl(Map &map) : map(map) {}
size_t len() override { return map.size(); }
iterator iter() override { return make_value_iterator(map.begin(), map.end()); }
Map ↦
};
template <typename Map>
struct ItemsViewImpl : public detail::items_view {
explicit ItemsViewImpl(Map &map) : map(map) {}
size_t len() override { return map.size(); }
iterator iter() override { return make_iterator(map.begin(), map.end()); }
Map ↦
};
PYBIND11_NAMESPACE_END(detail)
template <typename Map, typename holder_type = std::unique_ptr<Map>, typename... Args>
class_<Map, holder_type> bind_map(handle scope, const std::string &name, Args &&...args) {
using KeyType = typename Map::key_type;
using MappedType = typename Map::mapped_type;
using KeysView = detail::keys_view;
using ValuesView = detail::values_view;
using ItemsView = detail::items_view;
using Class_ = class_<Map, holder_type>;
// If either type is a non-module-local bound type then make the map binding non-local as well;
// otherwise (e.g. both types are either module-local or converting) the map will be
// module-local.
auto *tinfo = detail::get_type_info(typeid(MappedType));
bool local = !tinfo || tinfo->module_local;
if (local) {
tinfo = detail::get_type_info(typeid(KeyType));
local = !tinfo || tinfo->module_local;
}
Class_ cl(scope, name.c_str(), pybind11::module_local(local), std::forward<Args>(args)...);
// Wrap KeysView if it wasn't already wrapped
if (!detail::get_type_info(typeid(KeysView))) {
class_<KeysView> keys_view(scope, "KeysView", pybind11::module_local(local));
keys_view.def("__len__", &KeysView::len);
keys_view.def("__iter__",
&KeysView::iter,
keep_alive<0, 1>() /* Essential: keep view alive while iterator exists */
);
keys_view.def("__contains__", &KeysView::contains);
}
// Similarly for ValuesView:
if (!detail::get_type_info(typeid(ValuesView))) {
class_<ValuesView> values_view(scope, "ValuesView", pybind11::module_local(local));
values_view.def("__len__", &ValuesView::len);
values_view.def("__iter__",
&ValuesView::iter,
keep_alive<0, 1>() /* Essential: keep view alive while iterator exists */
);
}
// Similarly for ItemsView:
if (!detail::get_type_info(typeid(ItemsView))) {
class_<ItemsView> items_view(scope, "ItemsView", pybind11::module_local(local));
items_view.def("__len__", &ItemsView::len);
items_view.def("__iter__",
&ItemsView::iter,
keep_alive<0, 1>() /* Essential: keep view alive while iterator exists */
);
}
cl.def(init<>());
// Register stream insertion operator (if possible)
detail::map_if_insertion_operator<Map, Class_>(cl, name);
cl.def(
"__bool__",
[](const Map &m) -> bool { return !m.empty(); },
"Check whether the map is nonempty");
cl.def(
"__iter__",
[](Map &m) { return make_key_iterator(m.begin(), m.end()); },
keep_alive<0, 1>() /* Essential: keep map alive while iterator exists */
);
cl.def(
"keys",
[](Map &m) { return std::unique_ptr<KeysView>(new detail::KeysViewImpl<Map>(m)); },
keep_alive<0, 1>() /* Essential: keep map alive while view exists */
);
cl.def(
"values",
[](Map &m) { return std::unique_ptr<ValuesView>(new detail::ValuesViewImpl<Map>(m)); },
keep_alive<0, 1>() /* Essential: keep map alive while view exists */
);
cl.def(
"items",
[](Map &m) { return std::unique_ptr<ItemsView>(new detail::ItemsViewImpl<Map>(m)); },
keep_alive<0, 1>() /* Essential: keep map alive while view exists */
);
cl.def(
"__getitem__",
[](Map &m, const KeyType &k) -> MappedType & {
auto it = m.find(k);
if (it == m.end()) {
throw key_error();
}
return it->second;
},
return_value_policy::reference_internal // ref + keepalive
);
cl.def("__contains__", [](Map &m, const KeyType &k) -> bool {
auto it = m.find(k);
if (it == m.end()) {
return false;
}
return true;
});
// Fallback for when the object is not of the key type
cl.def("__contains__", [](Map &, const object &) -> bool { return false; });
// Assignment provided only if the type is copyable
detail::map_assignment<Map, Class_>(cl);
cl.def("__delitem__", [](Map &m, const KeyType &k) {
auto it = m.find(k);
if (it == m.end()) {
throw key_error();
}
m.erase(it);
});
// Always use a lambda in case of `using` declaration
cl.def("__len__", [](const Map &m) { return m.size(); });
return cl;
}
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=========================================================================================================================================
SOURCE CODE FILE: type_caster_pyobject_ptr.h
LINES: 1
SIZE: 1.94 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\type_caster_pyobject_ptr.h
ENCODING: utf-8
```h
// Copyright (c) 2023 The pybind Community.
#pragma once
#include "detail/common.h"
#include "detail/descr.h"
#include "cast.h"
#include "pytypes.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
template <>
class type_caster<PyObject> {
public:
static constexpr auto name = const_name("object"); // See discussion under PR #4601.
// This overload is purely to guard against accidents.
template <typename T,
detail::enable_if_t<!is_same_ignoring_cvref<T, PyObject *>::value, int> = 0>
static handle cast(T &&, return_value_policy, handle /*parent*/) {
static_assert(is_same_ignoring_cvref<T, PyObject *>::value,
"Invalid C++ type T for to-Python conversion (type_caster<PyObject>).");
return nullptr; // Unreachable.
}
static handle cast(PyObject *src, return_value_policy policy, handle /*parent*/) {
if (src == nullptr) {
throw error_already_set();
}
if (PyErr_Occurred()) {
raise_from(PyExc_SystemError, "src != nullptr but PyErr_Occurred()");
throw error_already_set();
}
if (policy == return_value_policy::take_ownership) {
return src;
}
if (policy == return_value_policy::reference
|| policy == return_value_policy::automatic_reference) {
return handle(src).inc_ref();
}
pybind11_fail("type_caster<PyObject>::cast(): unsupported return_value_policy: "
+ std::to_string(static_cast<int>(policy)));
}
bool load(handle src, bool) {
value = reinterpret_borrow<object>(src);
return true;
}
template <typename T>
using cast_op_type = PyObject *;
explicit operator PyObject *() { return value.ptr(); }
private:
object value;
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=======================================================================================================================
SOURCE CODE FILE: typing.h
LINES: 1
SIZE: 7.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\pybind11\typing.h
ENCODING: utf-8
```h
/*
pybind11/typing.h: Convenience wrapper classes for basic Python types
with more explicit annotations.
Copyright (c) 2023 Dustin Spicuzza <[email protected]>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
#include "cast.h"
#include "pytypes.h"
#include <algorithm>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(typing)
/*
The following types can be used to direct pybind11-generated docstrings
to have have more explicit types (e.g., `list[str]` instead of `list`).
Just use these in place of existing types.
There is no additional enforcement of types at runtime.
*/
template <typename... Types>
class Tuple : public tuple {
using tuple::tuple;
};
template <typename K, typename V>
class Dict : public dict {
using dict::dict;
};
template <typename T>
class List : public list {
using list::list;
};
template <typename T>
class Set : public set {
using set::set;
};
template <typename T>
class Iterable : public iterable {
using iterable::iterable;
};
template <typename T>
class Iterator : public iterator {
using iterator::iterator;
};
template <typename Signature>
class Callable;
template <typename Return, typename... Args>
class Callable<Return(Args...)> : public function {
using function::function;
};
template <typename T>
class Type : public type {
using type::type;
};
template <typename... Types>
class Union : public object {
PYBIND11_OBJECT_DEFAULT(Union, object, PyObject_Type)
using object::object;
};
template <typename T>
class Optional : public object {
PYBIND11_OBJECT_DEFAULT(Optional, object, PyObject_Type)
using object::object;
};
template <typename T>
class TypeGuard : public bool_ {
using bool_::bool_;
};
template <typename T>
class TypeIs : public bool_ {
using bool_::bool_;
};
class NoReturn : public none {
using none::none;
};
class Never : public none {
using none::none;
};
#if defined(__cpp_nontype_template_args) && __cpp_nontype_template_args >= 201911L
# define PYBIND11_TYPING_H_HAS_STRING_LITERAL
template <size_t N>
struct StringLiteral {
constexpr StringLiteral(const char (&str)[N]) { std::copy_n(str, N, name); }
char name[N];
};
template <StringLiteral... StrLits>
class Literal : public object {
PYBIND11_OBJECT_DEFAULT(Literal, object, PyObject_Type)
};
// Example syntax for creating a TypeVar.
// typedef typing::TypeVar<"T"> TypeVarT;
template <StringLiteral>
class TypeVar : public object {
PYBIND11_OBJECT_DEFAULT(TypeVar, object, PyObject_Type)
using object::object;
};
#endif
PYBIND11_NAMESPACE_END(typing)
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename... Types>
struct handle_type_name<typing::Tuple<Types...>> {
static constexpr auto name = const_name("tuple[")
+ ::pybind11::detail::concat(make_caster<Types>::name...)
+ const_name("]");
};
template <>
struct handle_type_name<typing::Tuple<>> {
// PEP 484 specifies this syntax for an empty tuple
static constexpr auto name = const_name("tuple[()]");
};
template <typename T>
struct handle_type_name<typing::Tuple<T, ellipsis>> {
// PEP 484 specifies this syntax for a variable-length tuple
static constexpr auto name
= const_name("tuple[") + make_caster<T>::name + const_name(", ...]");
};
template <typename K, typename V>
struct handle_type_name<typing::Dict<K, V>> {
static constexpr auto name = const_name("dict[") + make_caster<K>::name + const_name(", ")
+ make_caster<V>::name + const_name("]");
};
template <typename T>
struct handle_type_name<typing::List<T>> {
static constexpr auto name = const_name("list[") + make_caster<T>::name + const_name("]");
};
template <typename T>
struct handle_type_name<typing::Set<T>> {
static constexpr auto name = const_name("set[") + make_caster<T>::name + const_name("]");
};
template <typename T>
struct handle_type_name<typing::Iterable<T>> {
static constexpr auto name = const_name("Iterable[") + make_caster<T>::name + const_name("]");
};
template <typename T>
struct handle_type_name<typing::Iterator<T>> {
static constexpr auto name = const_name("Iterator[") + make_caster<T>::name + const_name("]");
};
template <typename Return, typename... Args>
struct handle_type_name<typing::Callable<Return(Args...)>> {
using retval_type = conditional_t<std::is_same<Return, void>::value, void_type, Return>;
static constexpr auto name
= const_name("Callable[[") + ::pybind11::detail::concat(make_caster<Args>::name...)
+ const_name("], ") + make_caster<retval_type>::name + const_name("]");
};
template <typename Return>
struct handle_type_name<typing::Callable<Return(ellipsis)>> {
// PEP 484 specifies this syntax for defining only return types of callables
using retval_type = conditional_t<std::is_same<Return, void>::value, void_type, Return>;
static constexpr auto name
= const_name("Callable[..., ") + make_caster<retval_type>::name + const_name("]");
};
template <typename T>
struct handle_type_name<typing::Type<T>> {
static constexpr auto name = const_name("type[") + make_caster<T>::name + const_name("]");
};
template <typename... Types>
struct handle_type_name<typing::Union<Types...>> {
static constexpr auto name = const_name("Union[")
+ ::pybind11::detail::concat(make_caster<Types>::name...)
+ const_name("]");
};
template <typename T>
struct handle_type_name<typing::Optional<T>> {
static constexpr auto name = const_name("Optional[") + make_caster<T>::name + const_name("]");
};
template <typename T>
struct handle_type_name<typing::TypeGuard<T>> {
static constexpr auto name = const_name("TypeGuard[") + make_caster<T>::name + const_name("]");
};
template <typename T>
struct handle_type_name<typing::TypeIs<T>> {
static constexpr auto name = const_name("TypeIs[") + make_caster<T>::name + const_name("]");
};
template <>
struct handle_type_name<typing::NoReturn> {
static constexpr auto name = const_name("NoReturn");
};
template <>
struct handle_type_name<typing::Never> {
static constexpr auto name = const_name("Never");
};
#if defined(PYBIND11_TYPING_H_HAS_STRING_LITERAL)
template <typing::StringLiteral... Literals>
struct handle_type_name<typing::Literal<Literals...>> {
static constexpr auto name = const_name("Literal[")
+ pybind11::detail::concat(const_name(Literals.name)...)
+ const_name("]");
};
template <typing::StringLiteral StrLit>
struct handle_type_name<typing::TypeVar<StrLit>> {
static constexpr auto name = const_name(StrLit.name);
};
#endif
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
```
|
=============================================================================================================
SOURCE CODE FILE: sleef.h
LINES: 1
SIZE: 267.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\sleef.h
ENCODING: utf-8
```h
// Copyright Naoki Shibata and contributors 2010 - 2024.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef __SLEEF_H__
#define __SLEEF_H__
#define SLEEF_VERSION_MAJOR 3
#define SLEEF_VERSION_MINOR 7
#define SLEEF_VERSION_PATCHLEVEL 0
#include <stddef.h>
#include <stdint.h>
#if defined (__GNUC__) || defined (__clang__) || defined(__INTEL_COMPILER)
#define SLEEF_CONST __attribute__((const))
#define SLEEF_INLINE __attribute__((always_inline))
#elif defined(_MSC_VER)
#define SLEEF_CONST
#define SLEEF_INLINE __forceinline
#endif
#if defined(__AVX2__) || defined(__aarch64__) || defined(__arm__) || defined(__powerpc64__) || defined(__zarch__)
#ifndef FP_FAST_FMA
#define FP_FAST_FMA
#endif
#ifndef FP_FAST_FMAF
#define FP_FAST_FMAF
#endif
#endif
#if defined(_MSC_VER) && !defined(__STDC__)
#define __STDC__ 1
#endif
#if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS)
#ifdef SLEEF_IMPORT_IS_EXPORT
#define SLEEF_IMPORT __declspec(dllexport)
#else // #ifdef SLEEF_IMPORT_IS_EXPORT
#define SLEEF_IMPORT __declspec(dllimport)
#if (defined(_MSC_VER))
#pragma comment(lib,"sleef.lib")
#endif // #if (defined(_MSC_VER))
#endif // #ifdef SLEEF_IMPORT_IS_EXPORT
#else // #if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS)
#define SLEEF_IMPORT
#endif // #if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS)
#if (defined(__GNUC__) || defined(__CLANG__)) && (defined(__i386__) || defined(__x86_64__))
#include <x86intrin.h>
#endif
#if (defined(_MSC_VER))
#include <intrin.h>
#endif
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
#endif
#if defined(__ARM_FEATURE_SVE)
#include <arm_sve.h>
#endif
#if defined(__VSX__) && defined(__PPC64__) && defined(__LITTLE_ENDIAN__)
#include <altivec.h>
typedef __vector double SLEEF_VECTOR_DOUBLE;
typedef __vector float SLEEF_VECTOR_FLOAT;
typedef __vector int SLEEF_VECTOR_INT;
typedef __vector unsigned int SLEEF_VECTOR_UINT;
typedef __vector long long SLEEF_VECTOR_LONGLONG;
typedef __vector unsigned long long SLEEF_VECTOR_ULONGLONG;
#endif
#if defined(__VX__) && defined(__VEC__)
#ifndef SLEEF_VECINTRIN_H_INCLUDED
#include <vecintrin.h>
#define SLEEF_VECINTRIN_H_INCLUDED
#endif
typedef __vector double SLEEF_VECTOR_DOUBLE;
typedef __vector float SLEEF_VECTOR_FLOAT;
typedef __vector int SLEEF_VECTOR_INT;
typedef __vector unsigned int SLEEF_VECTOR_UINT;
typedef __vector long long SLEEF_VECTOR_LONGLONG;
typedef __vector unsigned long long SLEEF_VECTOR_ULONGLONG;
#endif
//
#if defined(SLEEF_ENABLE_OMP_SIMD) && (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER)
#if defined(__aarch64__)
//#define SLEEF_PRAGMA_OMP_SIMD_DP _Pragma ("omp declare simd simdlen(2) notinbranch")
//#define SLEEF_PRAGMA_OMP_SIMD_SP _Pragma ("omp declare simd simdlen(4) notinbranch")
//#elif defined(__x86_64__) && defined(__AVX512F__)
//#define SLEEF_PRAGMA_OMP_SIMD_DP _Pragma ("omp declare simd simdlen(8) notinbranch")
//#define SLEEF_PRAGMA_OMP_SIMD_SP _Pragma ("omp declare simd simdlen(16) notinbranch")
#elif defined(__x86_64__) && defined(__AVX__)
#define SLEEF_PRAGMA_OMP_SIMD_DP _Pragma ("omp declare simd simdlen(4) notinbranch")
#define SLEEF_PRAGMA_OMP_SIMD_SP _Pragma ("omp declare simd simdlen(8) notinbranch")
#elif defined(__x86_64__) && defined(__SSE2__)
#define SLEEF_PRAGMA_OMP_SIMD_DP _Pragma ("omp declare simd simdlen(2) notinbranch")
#define SLEEF_PRAGMA_OMP_SIMD_SP _Pragma ("omp declare simd simdlen(4) notinbranch")
#endif
#endif
#ifndef SLEEF_PRAGMA_OMP_SIMD_DP
#define SLEEF_PRAGMA_OMP_SIMD_DP
#define SLEEF_PRAGMA_OMP_SIMD_SP
#endif
//
#ifndef SLEEF_FP_ILOGB0
#define SLEEF_FP_ILOGB0 ((int)0x80000000)
#endif
#ifndef SLEEF_FP_ILOGBNAN
#define SLEEF_FP_ILOGBNAN ((int)2147483647)
#endif
//
SLEEF_IMPORT void *Sleef_malloc(size_t z);
SLEEF_IMPORT void Sleef_free(void *ptr);
SLEEF_IMPORT uint64_t Sleef_currentTimeMicros();
#if defined(__i386__) || defined(__x86_64__) || defined(_MSC_VER)
SLEEF_IMPORT void Sleef_x86CpuID(int32_t out[4], uint32_t eax, uint32_t ecx);
#endif
//
#if defined(__riscv_v)
#include <riscv_vector.h>
typedef vfloat64m2_t Sleef_vfloat64m1_t_2;
typedef vfloat32m2_t Sleef_vfloat32m1_t_2;
typedef vfloat64m4_t Sleef_vfloat64m2_t_2;
typedef vfloat32m4_t Sleef_vfloat32m2_t_2;
#define Sleef_vfloat64m1_t_2_DEFINED
#define Sleef_vfloat32m1_t_2_DEFINED
#define Sleef_vfloat64m2_t_2_DEFINED
#define Sleef_vfloat32m2_t_2_DEFINED
#endif
#ifndef Sleef_double2_DEFINED
#define Sleef_double2_DEFINED
typedef struct {
double x, y;
} Sleef_double2;
#endif
#ifndef Sleef_float2_DEFINED
#define Sleef_float2_DEFINED
typedef struct {
float x, y;
} Sleef_float2;
#endif
#ifndef Sleef_longdouble2_DEFINED
#define Sleef_longdouble2_DEFINED
typedef struct {
long double x, y;
} Sleef_longdouble2;
#endif
#if (defined(__SIZEOF_FLOAT128__) && __SIZEOF_FLOAT128__ == 16) || (defined(__linux__) && defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) || (defined(__PPC64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 8)
#define SLEEF_FLOAT128_IS_IEEEQP
#endif
#if !defined(SLEEF_FLOAT128_IS_IEEEQP) && defined(__SIZEOF_LONG_DOUBLE__) && __SIZEOF_LONG_DOUBLE__ == 16 && (defined(__aarch64__) || defined(__zarch__))
#define SLEEF_LONGDOUBLE_IS_IEEEQP
#endif
#if !defined(Sleef_quad_DEFINED)
#define Sleef_quad_DEFINED
typedef struct { uint64_t x, y; } Sleef_uint64_2t;
#if defined(SLEEF_FLOAT128_IS_IEEEQP) || defined(ENABLEFLOAT128)
typedef __float128 Sleef_quad;
#define SLEEF_QUAD_C(x) (x ## Q)
#elif defined(SLEEF_LONGDOUBLE_IS_IEEEQP)
typedef long double Sleef_quad;
#define SLEEF_QUAD_C(x) (x ## L)
#else
typedef Sleef_uint64_2t Sleef_quad;
#endif
#endif
#if !defined(Sleef_quad2_DEFINED)
#define Sleef_quad2_DEFINED
typedef union {
struct {
Sleef_quad x, y;
};
Sleef_quad s[2];
} Sleef_quad2;
#endif
#ifdef __cplusplus
extern "C"
{
#endif
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sin_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cos_u35(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double2 Sleef_sincos_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tan_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_asin_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_acos_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atan_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atan2_u35(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cbrt_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sin_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cos_u10(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double2 Sleef_sincos_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tan_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_asin_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_acos_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atan_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atan2_u10(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cbrt_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_pow_u10(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sinh_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cosh_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tanh_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sinh_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cosh_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tanh_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_asinh_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_acosh_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atanh_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp2_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp10_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp2_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp10_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_expm1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log10_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log2_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log2_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log1p_u10(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double2 Sleef_sincospi_u05(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double2 Sleef_sincospi_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sinpi_u05(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cospi_u05(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_ldexp(double, int);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST int Sleef_ilogb(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fma(double, double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sqrt(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sqrt_u05(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sqrt_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_hypot_u05(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_hypot_u35(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fabs(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_copysign(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fmax(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fmin(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fdim(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_trunc(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_floor(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_ceil(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_round(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_rint(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_nextafter(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_frfrexp(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST int Sleef_expfrexp(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fmod(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_remainder(double, double);
SLEEF_IMPORT SLEEF_CONST Sleef_double2 Sleef_modf(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_lgamma_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tgamma_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_erf_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_erfc_u15(double);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinf_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cosf_u35(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float2 Sleef_sincosf_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanf_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_asinf_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_acosf_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atanf_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f_u35(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_logf_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cosf_u10(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float2 Sleef_sincosf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fastsinf_u3500(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fastcosf_u3500(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_asinf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_acosf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atanf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f_u10(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_logf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_expf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_powf_u10(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fastpowf_u3500(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_coshf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_coshf_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_asinhf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_acoshf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atanhf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_expm1f_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log10f_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log2f_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log2f_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log1pf_u10(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float2 Sleef_sincospif_u05(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float2 Sleef_sincospif_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinpif_u05(float d);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cospif_u05(float d);
SLEEF_IMPORT SLEEF_CONST float Sleef_ldexpf(float, int);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST int Sleef_ilogbf(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fmaf(float, float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf_u05(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf_u05(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf_u35(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fabsf(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_copysignf(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fmaxf(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fminf(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fdimf(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_truncf(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_floorf(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_ceilf(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_roundf(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_rintf(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_nextafterf(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_frfrexpf(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST int Sleef_expfrexpf(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fmodf(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_remainderf(float, float);
SLEEF_IMPORT SLEEF_CONST Sleef_float2 Sleef_modff(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_lgammaf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tgammaf_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_erff_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_erfcf_u15(float);
SLEEF_IMPORT SLEEF_CONST Sleef_longdouble2 Sleef_sincospil_u05(long double);
SLEEF_IMPORT SLEEF_CONST Sleef_longdouble2 Sleef_sincospil_u35(long double);
#if defined(Sleef_quad2_DEFINED)
SLEEF_IMPORT SLEEF_CONST Sleef_quad2 Sleef_sincospiq_u05(Sleef_quad);
SLEEF_IMPORT SLEEF_CONST Sleef_quad2 Sleef_sincospiq_u35(Sleef_quad);
#endif
#ifdef __SSE2__
#ifndef Sleef___m128d_2_DEFINED
typedef struct {
__m128d x, y;
} Sleef___m128d_2;
#define Sleef___m128d_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u35(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u10(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_powd2_u10(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastsind2_u3500(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastcosd2_u3500(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastpowd2_u3500(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asinhd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acoshd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atanhd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expm1d2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log10d2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log1pd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u05(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinpid2_u05(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cospid2_u05(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ldexpd2(__m128d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmad2(__m128d, __m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u05(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u35(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u05(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u35(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fabsd2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_copysignd2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmaxd2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmind2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fdimd2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_truncd2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_floord2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ceild2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_roundd2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_rintd2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_nextafterd2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_frfrexpd2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmodd2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_remainderd2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_modfd2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_lgammad2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tgammad2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfd2_u10(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfcd2_u15(__m128d);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd2(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd2(int);
#ifndef Sleef___m128_2_DEFINED
typedef struct {
__m128 x, y;
} Sleef___m128_2;
#define Sleef___m128_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u35(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u10(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_powf4_u10(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastsinf4_u3500(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastcosf4_u3500(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastpowf4_u3500(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinhf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acoshf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanhf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expm1f4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log10f4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log1pf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u05(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinpif4_u05(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cospif4_u05(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaf4(__m128, __m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u05(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u35(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u05(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u35(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fabsf4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_copysignf4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaxf4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fminf4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fdimf4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_truncf4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_floorf4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_ceilf4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_roundf4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_rintf4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_nextafterf4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_frfrexpf4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmodf4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_remainderf4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_modff4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_lgammaf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tgammaf4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erff4_u10(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erfcf4_u15(__m128);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf4(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf4(int);
#endif
#ifdef __SSE2__
#ifndef Sleef___m128d_2_DEFINED
typedef struct {
__m128d x, y;
} Sleef___m128d_2;
#define Sleef___m128d_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sind2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cosd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tand2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_asind2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_acosd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atand2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u35sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atan2d2_u35sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_logd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cbrtd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sind2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cosd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tand2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_asind2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_acosd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atand2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u10sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atan2d2_u10sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_logd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cbrtd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_expd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_powd2_u10sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_powd2_u10sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sinhd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_coshd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tanhd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sinhd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_coshd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tanhd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastsind2_u3500sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fastsind2_u3500sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastcosd2_u3500sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fastcosd2_u3500sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastpowd2_u3500sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fastpowd2_u3500sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asinhd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_asinhd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acoshd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_acoshd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atanhd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atanhd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp2d2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp2d2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp10d2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp10d2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expm1d2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_expm1d2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log10d2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log10d2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log2d2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log2d2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log1pd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log1pd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u05sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinpid2_u05sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sinpid2_u05sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cospid2_u05sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cospid2_u05sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ldexpd2_sse2(__m128d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_ldexpd2_sse2(__m128d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_cinz_ilogbd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmad2_sse2(__m128d, __m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmad2_sse2(__m128d, __m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sqrtd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u05sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sqrtd2_u05sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sqrtd2_u35sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u05sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_hypotd2_u05sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u35sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_hypotd2_u35sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fabsd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fabsd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_copysignd2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_copysignd2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmaxd2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmaxd2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmind2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmind2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fdimd2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fdimd2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_truncd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_truncd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_floord2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_floord2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ceild2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_ceild2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_roundd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_roundd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_rintd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_rintd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_nextafterd2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_nextafterd2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_frfrexpd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_frfrexpd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_cinz_expfrexpd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmodd2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmodd2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_remainderd2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_remainderd2_sse2(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_modfd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_modfd2_sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_lgammad2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_lgammad2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tgammad2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tgammad2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_erfd2_u10sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfcd2_u15sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_erfcd2_u15sse2(__m128d);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd2_sse2(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd2_sse2(int);
#ifndef Sleef___m128_2_DEFINED
typedef struct {
__m128 x, y;
} Sleef___m128_2;
#define Sleef___m128_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cosf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_asinf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_acosf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atanf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u35sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atan2f4_u35sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_logf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cbrtf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cosf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_asinf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_acosf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atanf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u10sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atan2f4_u10sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_logf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cbrtf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_expf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_powf4_u10sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_powf4_u10sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinhf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_coshf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanhf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinhf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_coshf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanhf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastsinf4_u3500sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fastsinf4_u3500sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastcosf4_u3500sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fastcosf4_u3500sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastpowf4_u3500sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fastpowf4_u3500sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinhf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_asinhf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acoshf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_acoshf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanhf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atanhf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp2f4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp2f4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp10f4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp10f4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expm1f4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_expm1f4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log10f4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log10f4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log2f4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log2f4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log1pf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log1pf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u05sse2(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05sse2(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinpif4_u05sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinpif4_u05sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cospif4_u05sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cospif4_u05sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaf4_sse2(__m128, __m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fmaf4_sse2(__m128, __m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sqrtf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u05sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sqrtf4_u05sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sqrtf4_u35sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u05sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_hypotf4_u05sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u35sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_hypotf4_u35sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fabsf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fabsf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_copysignf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_copysignf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaxf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fmaxf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fminf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fminf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fdimf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fdimf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_truncf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_truncf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_floorf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_floorf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_ceilf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_ceilf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_roundf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_roundf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_rintf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_rintf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_nextafterf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_nextafterf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_frfrexpf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_frfrexpf4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmodf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fmodf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_remainderf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_remainderf4_sse2(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_modff4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_modff4_sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_lgammaf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_lgammaf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tgammaf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tgammaf4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erff4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_erff4_u10sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erfcf4_u15sse2(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_erfcf4_u15sse2(__m128);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf4_sse2(int);
SLEEF_IMPORT SLEEF_CONST int Sleef_cinz_getIntf4_sse2(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf4_sse2(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_cinz_getPtrf4_sse2(int);
#endif
#ifdef __SSE2__
#ifndef Sleef___m128d_2_DEFINED
typedef struct {
__m128d x, y;
} Sleef___m128d_2;
#define Sleef___m128d_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sind2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cosd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tand2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_asind2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_acosd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atand2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u35sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atan2d2_u35sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_logd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cbrtd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sind2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cosd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tand2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_asind2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_acosd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atand2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u10sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atan2d2_u10sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_logd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cbrtd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_expd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_powd2_u10sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_powd2_u10sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sinhd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_coshd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tanhd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sinhd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_coshd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tanhd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastsind2_u3500sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fastsind2_u3500sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastcosd2_u3500sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fastcosd2_u3500sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastpowd2_u3500sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fastpowd2_u3500sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asinhd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_asinhd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acoshd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_acoshd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atanhd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atanhd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp2d2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp2d2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp10d2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp10d2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expm1d2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_expm1d2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log10d2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log10d2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log2d2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log2d2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log1pd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log1pd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u05sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinpid2_u05sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sinpid2_u05sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cospid2_u05sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cospid2_u05sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ldexpd2_sse4(__m128d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_ldexpd2_sse4(__m128d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_cinz_ilogbd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmad2_sse4(__m128d, __m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmad2_sse4(__m128d, __m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sqrtd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u05sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sqrtd2_u05sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sqrtd2_u35sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u05sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_hypotd2_u05sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u35sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_hypotd2_u35sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fabsd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fabsd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_copysignd2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_copysignd2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmaxd2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmaxd2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmind2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmind2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fdimd2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fdimd2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_truncd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_truncd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_floord2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_floord2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ceild2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_ceild2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_roundd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_roundd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_rintd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_rintd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_nextafterd2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_nextafterd2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_frfrexpd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_frfrexpd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_cinz_expfrexpd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmodd2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmodd2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_remainderd2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_remainderd2_sse4(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_modfd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_modfd2_sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_lgammad2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_lgammad2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tgammad2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tgammad2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_erfd2_u10sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfcd2_u15sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_erfcd2_u15sse4(__m128d);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd2_sse4(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd2_sse4(int);
#ifndef Sleef___m128_2_DEFINED
typedef struct {
__m128 x, y;
} Sleef___m128_2;
#define Sleef___m128_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cosf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_asinf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_acosf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atanf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u35sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atan2f4_u35sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_logf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cbrtf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cosf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_asinf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_acosf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atanf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u10sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atan2f4_u10sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_logf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cbrtf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_expf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_powf4_u10sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_powf4_u10sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinhf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_coshf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanhf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinhf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_coshf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanhf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastsinf4_u3500sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fastsinf4_u3500sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastcosf4_u3500sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fastcosf4_u3500sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastpowf4_u3500sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fastpowf4_u3500sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinhf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_asinhf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acoshf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_acoshf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanhf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atanhf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp2f4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp2f4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp10f4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp10f4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expm1f4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_expm1f4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log10f4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log10f4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log2f4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log2f4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log1pf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log1pf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u05sse4(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05sse4(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinpif4_u05sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinpif4_u05sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cospif4_u05sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cospif4_u05sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaf4_sse4(__m128, __m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fmaf4_sse4(__m128, __m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sqrtf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u05sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sqrtf4_u05sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sqrtf4_u35sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u05sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_hypotf4_u05sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u35sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_hypotf4_u35sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fabsf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fabsf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_copysignf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_copysignf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaxf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fmaxf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fminf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fminf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fdimf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fdimf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_truncf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_truncf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_floorf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_floorf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_ceilf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_ceilf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_roundf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_roundf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_rintf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_rintf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_nextafterf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_nextafterf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_frfrexpf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_frfrexpf4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmodf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fmodf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_remainderf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_remainderf4_sse4(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_modff4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_modff4_sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_lgammaf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_lgammaf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tgammaf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tgammaf4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erff4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_erff4_u10sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erfcf4_u15sse4(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_erfcf4_u15sse4(__m128);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf4_sse4(int);
SLEEF_IMPORT SLEEF_CONST int Sleef_cinz_getIntf4_sse4(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf4_sse4(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_cinz_getPtrf4_sse4(int);
#endif
#ifdef __AVX__
#ifndef Sleef___m256d_2_DEFINED
typedef struct {
__m256d x, y;
} Sleef___m256d_2;
#define Sleef___m256d_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u35(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u10(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_powd4_u10(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastsind4_u3500(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastcosd4_u3500(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastpowd4_u3500(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asinhd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acoshd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atanhd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expm1d4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log10d4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log1pd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u05(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinpid4_u05(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cospid4_u05(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ldexpd4(__m256d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmad4(__m256d, __m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u05(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u35(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u05(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u35(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fabsd4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_copysignd4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmaxd4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmind4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fdimd4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_truncd4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_floord4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ceild4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_roundd4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_rintd4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_nextafterd4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_frfrexpd4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmodd4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_remainderd4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_modfd4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_lgammad4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tgammad4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfd4_u10(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfcd4_u15(__m256d);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd4(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd4(int);
#ifndef Sleef___m256_2_DEFINED
typedef struct {
__m256 x, y;
} Sleef___m256_2;
#define Sleef___m256_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u35(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u10(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_powf8_u10(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastsinf8_u3500(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastcosf8_u3500(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastpowf8_u3500(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinhf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acoshf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanhf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expm1f8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log10f8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log1pf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u05(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinpif8_u05(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cospif8_u05(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaf8(__m256, __m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u05(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u35(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u05(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u35(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fabsf8(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_copysignf8(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaxf8(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fminf8(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fdimf8(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_truncf8(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_floorf8(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_ceilf8(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_roundf8(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_rintf8(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_nextafterf8(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_frfrexpf8(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmodf8(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_remainderf8(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_modff8(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_lgammaf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tgammaf8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erff8_u10(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erfcf8_u15(__m256);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf8(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf8(int);
#endif
#ifdef __AVX__
#ifndef Sleef___m256d_2_DEFINED
typedef struct {
__m256d x, y;
} Sleef___m256d_2;
#define Sleef___m256d_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sind4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_cosd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_tand4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_asind4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_acosd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_atand4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u35avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_atan2d4_u35avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_logd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_cbrtd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sind4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_cosd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_tand4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_asind4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_acosd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_atand4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u10avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_atan2d4_u10avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_logd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_cbrtd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_expd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_powd4_u10avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_powd4_u10avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sinhd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_coshd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_tanhd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sinhd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_coshd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_tanhd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastsind4_u3500avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fastsind4_u3500avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastcosd4_u3500avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fastcosd4_u3500avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastpowd4_u3500avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fastpowd4_u3500avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asinhd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_asinhd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acoshd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_acoshd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atanhd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_atanhd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_exp2d4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_exp2d4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_exp10d4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_exp10d4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expm1d4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_expm1d4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log10d4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_log10d4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_log2d4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_log2d4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log1pd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_log1pd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u05avx(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u05avx(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinpid4_u05avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sinpid4_u05avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cospid4_u05avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_cospid4_u05avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ldexpd4_avx(__m256d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_ldexpd4_avx(__m256d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_cinz_ilogbd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmad4_avx(__m256d, __m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fmad4_avx(__m256d, __m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sqrtd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u05avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sqrtd4_u05avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sqrtd4_u35avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u05avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_hypotd4_u05avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u35avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_hypotd4_u35avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fabsd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fabsd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_copysignd4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_copysignd4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmaxd4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fmaxd4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmind4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fmind4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fdimd4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fdimd4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_truncd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_truncd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_floord4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_floord4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ceild4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_ceild4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_roundd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_roundd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_rintd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_rintd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_nextafterd4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_nextafterd4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_frfrexpd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_frfrexpd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_cinz_expfrexpd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmodd4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fmodd4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_remainderd4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_remainderd4_avx(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_modfd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_cinz_modfd4_avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_lgammad4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_lgammad4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tgammad4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_tgammad4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_erfd4_u10avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfcd4_u15avx(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_erfcd4_u15avx(__m256d);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd4_avx(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd4_avx(int);
#ifndef Sleef___m256_2_DEFINED
typedef struct {
__m256 x, y;
} Sleef___m256_2;
#define Sleef___m256_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sinf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_cosf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_cinz_sincosf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_tanf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_asinf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_acosf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_atanf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u35avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_atan2f8_u35avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_logf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_cbrtf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sinf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_cosf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_cinz_sincosf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_tanf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_asinf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_acosf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_atanf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u10avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_atan2f8_u10avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_logf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_cbrtf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_expf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_powf8_u10avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_powf8_u10avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sinhf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_coshf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_tanhf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sinhf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_coshf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_tanhf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastsinf8_u3500avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fastsinf8_u3500avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastcosf8_u3500avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fastcosf8_u3500avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastpowf8_u3500avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fastpowf8_u3500avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinhf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_asinhf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acoshf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_acoshf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanhf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_atanhf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_exp2f8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_exp2f8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_exp10f8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_exp10f8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expm1f8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_expm1f8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log10f8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_log10f8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_log2f8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_log2f8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log1pf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_log1pf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u05avx(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_cinz_sincospif8_u05avx(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_cinz_sincospif8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinpif8_u05avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sinpif8_u05avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cospif8_u05avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_cospif8_u05avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaf8_avx(__m256, __m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fmaf8_avx(__m256, __m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sqrtf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u05avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sqrtf8_u05avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sqrtf8_u35avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u05avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_hypotf8_u05avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u35avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_hypotf8_u35avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fabsf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fabsf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_copysignf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_copysignf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaxf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fmaxf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fminf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fminf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fdimf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fdimf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_truncf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_truncf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_floorf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_floorf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_ceilf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_ceilf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_roundf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_roundf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_rintf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_rintf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_nextafterf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_nextafterf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_frfrexpf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_frfrexpf8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmodf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fmodf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_remainderf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_remainderf8_avx(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_modff8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_cinz_modff8_avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_lgammaf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_lgammaf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tgammaf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_tgammaf8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erff8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_erff8_u10avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erfcf8_u15avx(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_erfcf8_u15avx(__m256);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf8_avx(int);
SLEEF_IMPORT SLEEF_CONST int Sleef_cinz_getIntf8_avx(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf8_avx(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_cinz_getPtrf8_avx(int);
#endif
#ifdef __AVX__
#ifndef Sleef___m256d_2_DEFINED
typedef struct {
__m256d x, y;
} Sleef___m256d_2;
#define Sleef___m256d_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sind4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cosd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincosd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tand4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_asind4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_acosd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atand4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u35fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atan2d4_u35fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_logd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cbrtd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sind4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cosd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincosd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tand4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_asind4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_acosd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atand4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u10fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atan2d4_u10fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_logd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cbrtd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_expd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_powd4_u10fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_powd4_u10fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sinhd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_coshd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tanhd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sinhd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_coshd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tanhd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastsind4_u3500fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fastsind4_u3500fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastcosd4_u3500fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fastcosd4_u3500fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastpowd4_u3500fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fastpowd4_u3500fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asinhd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_asinhd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acoshd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_acoshd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atanhd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atanhd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp2d4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp2d4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp10d4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp10d4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expm1d4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_expm1d4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log10d4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log10d4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log2d4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log2d4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log1pd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log1pd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u05fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincospid4_u05fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincospid4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinpid4_u05fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sinpid4_u05fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cospid4_u05fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cospid4_u05fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ldexpd4_fma4(__m256d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_ldexpd4_fma4(__m256d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_finz_ilogbd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmad4_fma4(__m256d, __m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmad4_fma4(__m256d, __m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sqrtd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u05fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sqrtd4_u05fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sqrtd4_u35fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u05fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_hypotd4_u05fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u35fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_hypotd4_u35fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fabsd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fabsd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_copysignd4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_copysignd4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmaxd4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmaxd4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmind4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmind4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fdimd4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fdimd4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_truncd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_truncd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_floord4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_floord4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ceild4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_ceild4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_roundd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_roundd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_rintd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_rintd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_nextafterd4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_nextafterd4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_frfrexpd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_frfrexpd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_finz_expfrexpd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmodd4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmodd4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_remainderd4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_remainderd4_fma4(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_modfd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_modfd4_fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_lgammad4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_lgammad4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tgammad4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tgammad4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_erfd4_u10fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfcd4_u15fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_erfcd4_u15fma4(__m256d);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd4_fma4(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd4_fma4(int);
#ifndef Sleef___m256_2_DEFINED
typedef struct {
__m256 x, y;
} Sleef___m256_2;
#define Sleef___m256_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cosf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincosf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_asinf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_acosf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atanf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u35fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atan2f8_u35fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_logf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cbrtf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cosf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincosf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_asinf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_acosf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atanf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u10fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atan2f8_u10fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_logf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cbrtf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_expf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_powf8_u10fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_powf8_u10fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinhf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_coshf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanhf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinhf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_coshf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanhf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastsinf8_u3500fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fastsinf8_u3500fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastcosf8_u3500fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fastcosf8_u3500fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastpowf8_u3500fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fastpowf8_u3500fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinhf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_asinhf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acoshf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_acoshf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanhf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atanhf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp2f8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp2f8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp10f8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp10f8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expm1f8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_expm1f8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log10f8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log10f8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log2f8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log2f8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log1pf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log1pf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u05fma4(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincospif8_u05fma4(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincospif8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinpif8_u05fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinpif8_u05fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cospif8_u05fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cospif8_u05fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaf8_fma4(__m256, __m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fmaf8_fma4(__m256, __m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sqrtf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u05fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sqrtf8_u05fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sqrtf8_u35fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u05fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_hypotf8_u05fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u35fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_hypotf8_u35fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fabsf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fabsf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_copysignf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_copysignf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaxf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fmaxf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fminf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fminf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fdimf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fdimf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_truncf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_truncf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_floorf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_floorf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_ceilf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_ceilf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_roundf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_roundf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_rintf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_rintf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_nextafterf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_nextafterf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_frfrexpf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_frfrexpf8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmodf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fmodf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_remainderf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_remainderf8_fma4(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_modff8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_modff8_fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_lgammaf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_lgammaf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tgammaf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tgammaf8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erff8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_erff8_u10fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erfcf8_u15fma4(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_erfcf8_u15fma4(__m256);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf8_fma4(int);
SLEEF_IMPORT SLEEF_CONST int Sleef_finz_getIntf8_fma4(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf8_fma4(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_finz_getPtrf8_fma4(int);
#endif
#ifdef __AVX__
#ifndef Sleef___m256d_2_DEFINED
typedef struct {
__m256d x, y;
} Sleef___m256d_2;
#define Sleef___m256d_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sind4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cosd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincosd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tand4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_asind4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_acosd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atand4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u35avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atan2d4_u35avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_logd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cbrtd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sind4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cosd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincosd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tand4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_asind4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_acosd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atand4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u10avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atan2d4_u10avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_logd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cbrtd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_expd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_powd4_u10avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_powd4_u10avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sinhd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_coshd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tanhd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sinhd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_coshd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tanhd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastsind4_u3500avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fastsind4_u3500avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastcosd4_u3500avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fastcosd4_u3500avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastpowd4_u3500avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fastpowd4_u3500avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asinhd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_asinhd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acoshd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_acoshd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atanhd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atanhd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp2d4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp2d4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp10d4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp10d4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expm1d4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_expm1d4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log10d4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log10d4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log2d4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log2d4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log1pd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log1pd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u05avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincospid4_u05avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincospid4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinpid4_u05avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sinpid4_u05avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cospid4_u05avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cospid4_u05avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ldexpd4_avx2(__m256d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_ldexpd4_avx2(__m256d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_finz_ilogbd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmad4_avx2(__m256d, __m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmad4_avx2(__m256d, __m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sqrtd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u05avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sqrtd4_u05avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sqrtd4_u35avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u05avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_hypotd4_u05avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u35avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_hypotd4_u35avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fabsd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fabsd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_copysignd4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_copysignd4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmaxd4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmaxd4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmind4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmind4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fdimd4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fdimd4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_truncd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_truncd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_floord4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_floord4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ceild4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_ceild4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_roundd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_roundd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_rintd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_rintd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_nextafterd4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_nextafterd4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_frfrexpd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_frfrexpd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_finz_expfrexpd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmodd4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmodd4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_remainderd4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_remainderd4_avx2(__m256d, __m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_modfd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_modfd4_avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_lgammad4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_lgammad4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tgammad4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tgammad4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_erfd4_u10avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfcd4_u15avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_erfcd4_u15avx2(__m256d);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd4_avx2(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd4_avx2(int);
#ifndef Sleef___m256_2_DEFINED
typedef struct {
__m256 x, y;
} Sleef___m256_2;
#define Sleef___m256_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cosf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincosf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_asinf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_acosf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atanf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u35avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atan2f8_u35avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_logf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cbrtf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cosf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincosf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_asinf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_acosf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atanf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u10avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atan2f8_u10avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_logf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cbrtf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_expf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_powf8_u10avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_powf8_u10avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinhf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_coshf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanhf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinhf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_coshf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanhf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastsinf8_u3500avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fastsinf8_u3500avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastcosf8_u3500avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fastcosf8_u3500avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastpowf8_u3500avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fastpowf8_u3500avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinhf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_asinhf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acoshf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_acoshf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanhf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atanhf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp2f8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp2f8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp10f8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp10f8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expm1f8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_expm1f8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log10f8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log10f8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log2f8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log2f8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log1pf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log1pf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u05avx2(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincospif8_u05avx2(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincospif8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinpif8_u05avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinpif8_u05avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cospif8_u05avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cospif8_u05avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaf8_avx2(__m256, __m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fmaf8_avx2(__m256, __m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sqrtf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u05avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sqrtf8_u05avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sqrtf8_u35avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u05avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_hypotf8_u05avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u35avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_hypotf8_u35avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fabsf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fabsf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_copysignf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_copysignf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaxf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fmaxf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fminf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fminf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fdimf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fdimf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_truncf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_truncf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_floorf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_floorf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_ceilf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_ceilf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_roundf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_roundf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_rintf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_rintf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_nextafterf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_nextafterf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_frfrexpf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_frfrexpf8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmodf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fmodf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_remainderf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_remainderf8_avx2(__m256, __m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_modff8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_modff8_avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_lgammaf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_lgammaf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tgammaf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tgammaf8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erff8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_erff8_u10avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erfcf8_u15avx2(__m256);
SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_erfcf8_u15avx2(__m256);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf8_avx2(int);
SLEEF_IMPORT SLEEF_CONST int Sleef_finz_getIntf8_avx2(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf8_avx2(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_finz_getPtrf8_avx2(int);
#endif
#ifdef __SSE2__
#ifndef Sleef___m128d_2_DEFINED
typedef struct {
__m128d x, y;
} Sleef___m128d_2;
#define Sleef___m128d_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sind2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_cosd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_finz_sincosd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_tand2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_asind2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_acosd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_atand2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u35avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_atan2d2_u35avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_logd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_cbrtd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sind2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_cosd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_finz_sincosd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_tand2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_asind2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_acosd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_atand2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u10avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_atan2d2_u10avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_logd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_cbrtd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_expd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_powd2_u10avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_powd2_u10avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sinhd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_coshd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_tanhd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sinhd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_coshd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_tanhd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastsind2_u3500avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fastsind2_u3500avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastcosd2_u3500avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fastcosd2_u3500avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastpowd2_u3500avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fastpowd2_u3500avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asinhd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_asinhd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acoshd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_acoshd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atanhd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_atanhd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_exp2d2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_exp2d2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_exp10d2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_exp10d2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expm1d2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_expm1d2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log10d2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_log10d2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_log2d2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_log2d2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log1pd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_log1pd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u05avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_finz_sincospid2_u05avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_finz_sincospid2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinpid2_u05avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sinpid2_u05avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cospid2_u05avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_cospid2_u05avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ldexpd2_avx2128(__m128d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_ldexpd2_avx2128(__m128d, __m128i);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_finz_ilogbd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmad2_avx2128(__m128d, __m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fmad2_avx2128(__m128d, __m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sqrtd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u05avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sqrtd2_u05avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sqrtd2_u35avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u05avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_hypotd2_u05avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u35avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_hypotd2_u35avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fabsd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fabsd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_copysignd2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_copysignd2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmaxd2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fmaxd2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmind2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fmind2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fdimd2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fdimd2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_truncd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_truncd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_floord2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_floord2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ceild2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_ceild2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_roundd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_roundd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_rintd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_rintd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_nextafterd2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_nextafterd2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_frfrexpd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_frfrexpd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128i Sleef_finz_expfrexpd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmodd2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fmodd2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_remainderd2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_remainderd2_avx2128(__m128d, __m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_modfd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_finz_modfd2_avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_lgammad2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_lgammad2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tgammad2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_tgammad2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_erfd2_u10avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfcd2_u15avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_erfcd2_u15avx2128(__m128d);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd2_avx2128(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd2_avx2128(int);
#ifndef Sleef___m128_2_DEFINED
typedef struct {
__m128 x, y;
} Sleef___m128_2;
#define Sleef___m128_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sinf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_cosf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_finz_sincosf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_tanf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_asinf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_acosf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_atanf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u35avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_atan2f4_u35avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_logf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_cbrtf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sinf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_cosf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_finz_sincosf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_tanf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_asinf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_acosf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_atanf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u10avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_atan2f4_u10avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_logf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_cbrtf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_expf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_powf4_u10avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_powf4_u10avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sinhf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_coshf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_tanhf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sinhf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_coshf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_tanhf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastsinf4_u3500avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fastsinf4_u3500avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastcosf4_u3500avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fastcosf4_u3500avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastpowf4_u3500avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fastpowf4_u3500avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinhf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_asinhf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acoshf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_acoshf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanhf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_atanhf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_exp2f4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_exp2f4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_exp10f4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_exp10f4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expm1f4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_expm1f4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log10f4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_log10f4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_log2f4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_log2f4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log1pf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_log1pf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u05avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_finz_sincospif4_u05avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_finz_sincospif4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinpif4_u05avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sinpif4_u05avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cospif4_u05avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_cospif4_u05avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaf4_avx2128(__m128, __m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fmaf4_avx2128(__m128, __m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sqrtf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u05avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sqrtf4_u05avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sqrtf4_u35avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u05avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_hypotf4_u05avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u35avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_hypotf4_u35avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fabsf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fabsf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_copysignf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_copysignf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaxf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fmaxf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fminf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fminf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fdimf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fdimf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_truncf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_truncf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_floorf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_floorf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_ceilf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_ceilf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_roundf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_roundf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_rintf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_rintf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_nextafterf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_nextafterf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_frfrexpf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_frfrexpf4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmodf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fmodf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_remainderf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_remainderf4_avx2128(__m128, __m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_modff4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_finz_modff4_avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_lgammaf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_lgammaf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tgammaf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_tgammaf4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erff4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_erff4_u10avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erfcf4_u15avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_erfcf4_u15avx2128(__m128);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf4_avx2128(int);
SLEEF_IMPORT SLEEF_CONST int Sleef_finz_getIntf4_avx2128(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf4_avx2128(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_finz_getPtrf4_avx2128(int);
#endif
#ifdef __AVX512F__
#ifndef Sleef___m512d_2_DEFINED
typedef struct {
__m512d x, y;
} Sleef___m512d_2;
#define Sleef___m512d_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sind8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cosd8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincosd8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tand8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asind8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acosd8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atand8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atan2d8_u35(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_logd8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cbrtd8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sind8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cosd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincosd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tand8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asind8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acosd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atand8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atan2d8_u10(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_logd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cbrtd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_expd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_powd8_u10(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinhd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_coshd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tanhd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinhd8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_coshd8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tanhd8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastsind8_u3500(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastcosd8_u3500(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastpowd8_u3500(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asinhd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acoshd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atanhd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp2d8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp2d8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp10d8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp10d8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_expm1d8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log10d8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log2d8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log2d8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log1pd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincospid8_u05(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincospid8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinpid8_u05(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cospid8_u05(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_ldexpd8(__m512d, __m256i);
SLEEF_IMPORT SLEEF_CONST __m256i Sleef_ilogbd8(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmad8(__m512d, __m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_u05(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_u35(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_hypotd8_u05(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_hypotd8_u35(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fabsd8(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_copysignd8(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmaxd8(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmind8(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fdimd8(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_truncd8(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_floord8(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_ceild8(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_roundd8(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_rintd8(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_nextafterd8(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_frfrexpd8(__m512d);
SLEEF_IMPORT SLEEF_CONST __m256i Sleef_expfrexpd8(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmodd8(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_remainderd8(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_modfd8(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_lgammad8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tgammad8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_erfd8_u10(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_erfcd8_u15(__m512d);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd8(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd8(int);
#ifndef Sleef___m512_2_DEFINED
typedef struct {
__m512 x, y;
} Sleef___m512_2;
#define Sleef___m512_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinf16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cosf16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincosf16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanf16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinf16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acosf16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanf16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atan2f16_u35(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_logf16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cbrtf16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cosf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincosf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acosf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atan2f16_u10(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_logf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cbrtf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_expf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_powf16_u10(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinhf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_coshf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanhf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinhf16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_coshf16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanhf16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastsinf16_u3500(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastcosf16_u3500(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastpowf16_u3500(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinhf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acoshf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanhf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp2f16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp2f16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp10f16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp10f16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_expm1f16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log10f16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log2f16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log2f16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log1pf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincospif16_u05(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincospif16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinpif16_u05(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cospif16_u05(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmaf16(__m512, __m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_u05(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_u35(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_hypotf16_u05(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_hypotf16_u35(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fabsf16(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_copysignf16(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmaxf16(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fminf16(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fdimf16(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_truncf16(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_floorf16(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_ceilf16(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_roundf16(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_rintf16(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_nextafterf16(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_frfrexpf16(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmodf16(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_remainderf16(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_modff16(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_lgammaf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tgammaf16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_erff16_u10(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_erfcf16_u15(__m512);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf16(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf16(int);
#endif
#ifdef __AVX512F__
#ifndef Sleef___m512d_2_DEFINED
typedef struct {
__m512d x, y;
} Sleef___m512d_2;
#define Sleef___m512d_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sind8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sind8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cosd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_cosd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincosd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_finz_sincosd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tand8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_tand8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asind8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_asind8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acosd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_acosd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atand8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_atand8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atan2d8_u35avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_atan2d8_u35avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_logd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_logd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cbrtd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_cbrtd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sind8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sind8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cosd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_cosd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincosd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_finz_sincosd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tand8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_tand8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asind8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_asind8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acosd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_acosd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atand8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_atand8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atan2d8_u10avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_atan2d8_u10avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_logd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_logd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cbrtd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_cbrtd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_expd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_expd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_powd8_u10avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_powd8_u10avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinhd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sinhd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_coshd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_coshd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tanhd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_tanhd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinhd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sinhd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_coshd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_coshd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tanhd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_tanhd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastsind8_u3500avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fastsind8_u3500avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastcosd8_u3500avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fastcosd8_u3500avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastpowd8_u3500avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fastpowd8_u3500avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asinhd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_asinhd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acoshd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_acoshd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atanhd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_atanhd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp2d8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_exp2d8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp2d8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_exp2d8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp10d8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_exp10d8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp10d8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_exp10d8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_expm1d8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_expm1d8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log10d8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_log10d8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log2d8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_log2d8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log2d8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_log2d8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log1pd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_log1pd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincospid8_u05avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_finz_sincospid8_u05avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincospid8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_finz_sincospid8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinpid8_u05avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sinpid8_u05avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cospid8_u05avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_cospid8_u05avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_ldexpd8_avx512f(__m512d, __m256i);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_ldexpd8_avx512f(__m512d, __m256i);
SLEEF_IMPORT SLEEF_CONST __m256i Sleef_ilogbd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m256i Sleef_finz_ilogbd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmad8_avx512f(__m512d, __m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fmad8_avx512f(__m512d, __m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sqrtd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_u05avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sqrtd8_u05avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sqrtd8_u35avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_hypotd8_u05avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_hypotd8_u05avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_hypotd8_u35avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_hypotd8_u35avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fabsd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fabsd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_copysignd8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_copysignd8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmaxd8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fmaxd8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmind8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fmind8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fdimd8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fdimd8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_truncd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_truncd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_floord8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_floord8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_ceild8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_ceild8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_roundd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_roundd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_rintd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_rintd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_nextafterd8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_nextafterd8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_frfrexpd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_frfrexpd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m256i Sleef_expfrexpd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m256i Sleef_finz_expfrexpd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmodd8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fmodd8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_remainderd8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_remainderd8_avx512f(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_modfd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_finz_modfd8_avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_lgammad8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_lgammad8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tgammad8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_tgammad8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_erfd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_erfd8_u10avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_erfcd8_u15avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_erfcd8_u15avx512f(__m512d);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd8_avx512f(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd8_avx512f(int);
#ifndef Sleef___m512_2_DEFINED
typedef struct {
__m512 x, y;
} Sleef___m512_2;
#define Sleef___m512_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sinf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cosf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_cosf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincosf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_finz_sincosf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_tanf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_asinf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acosf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_acosf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_atanf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atan2f16_u35avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_atan2f16_u35avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_logf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_logf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cbrtf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_cbrtf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sinf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cosf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_cosf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincosf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_finz_sincosf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_tanf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_asinf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acosf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_acosf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_atanf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atan2f16_u10avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_atan2f16_u10avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_logf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_logf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cbrtf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_cbrtf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_expf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_expf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_powf16_u10avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_powf16_u10avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinhf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sinhf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_coshf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_coshf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanhf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_tanhf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinhf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sinhf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_coshf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_coshf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanhf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_tanhf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastsinf16_u3500avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fastsinf16_u3500avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastcosf16_u3500avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fastcosf16_u3500avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastpowf16_u3500avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fastpowf16_u3500avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinhf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_asinhf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acoshf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_acoshf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanhf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_atanhf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp2f16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_exp2f16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp2f16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_exp2f16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp10f16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_exp10f16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp10f16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_exp10f16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_expm1f16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_expm1f16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log10f16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_log10f16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log2f16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_log2f16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log2f16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_log2f16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log1pf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_log1pf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincospif16_u05avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_finz_sincospif16_u05avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincospif16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_finz_sincospif16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinpif16_u05avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sinpif16_u05avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cospif16_u05avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_cospif16_u05avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmaf16_avx512f(__m512, __m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fmaf16_avx512f(__m512, __m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sqrtf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_u05avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sqrtf16_u05avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sqrtf16_u35avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_hypotf16_u05avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_hypotf16_u05avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_hypotf16_u35avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_hypotf16_u35avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fabsf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fabsf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_copysignf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_copysignf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmaxf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fmaxf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fminf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fminf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fdimf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fdimf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_truncf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_truncf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_floorf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_floorf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_ceilf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_ceilf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_roundf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_roundf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_rintf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_rintf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_nextafterf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_nextafterf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_frfrexpf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_frfrexpf16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmodf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fmodf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_remainderf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_remainderf16_avx512f(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_modff16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_finz_modff16_avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_lgammaf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_lgammaf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tgammaf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_tgammaf16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_erff16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_erff16_u10avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_erfcf16_u15avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_erfcf16_u15avx512f(__m512);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf16_avx512f(int);
SLEEF_IMPORT SLEEF_CONST int Sleef_finz_getIntf16_avx512f(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf16_avx512f(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_finz_getPtrf16_avx512f(int);
#endif
#ifdef __AVX512F__
#ifndef Sleef___m512d_2_DEFINED
typedef struct {
__m512d x, y;
} Sleef___m512d_2;
#define Sleef___m512d_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sind8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sind8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cosd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_cosd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincosd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_cinz_sincosd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tand8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_tand8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asind8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_asind8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acosd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_acosd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atand8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_atand8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atan2d8_u35avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_atan2d8_u35avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_logd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_logd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cbrtd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_cbrtd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sind8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sind8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cosd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_cosd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincosd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_cinz_sincosd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tand8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_tand8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asind8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_asind8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acosd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_acosd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atand8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_atand8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atan2d8_u10avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_atan2d8_u10avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_logd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_logd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cbrtd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_cbrtd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_expd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_expd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_powd8_u10avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_powd8_u10avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinhd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sinhd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_coshd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_coshd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tanhd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_tanhd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinhd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sinhd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_coshd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_coshd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tanhd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_tanhd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastsind8_u3500avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fastsind8_u3500avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastcosd8_u3500avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fastcosd8_u3500avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastpowd8_u3500avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fastpowd8_u3500avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asinhd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_asinhd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acoshd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_acoshd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atanhd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_atanhd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp2d8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_exp2d8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp2d8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_exp2d8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp10d8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_exp10d8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp10d8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_exp10d8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_expm1d8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_expm1d8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log10d8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_log10d8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log2d8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_log2d8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log2d8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_log2d8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log1pd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_log1pd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincospid8_u05avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_cinz_sincospid8_u05avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincospid8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_cinz_sincospid8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinpid8_u05avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sinpid8_u05avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cospid8_u05avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_cospid8_u05avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_ldexpd8_avx512fnofma(__m512d, __m256i);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_ldexpd8_avx512fnofma(__m512d, __m256i);
SLEEF_IMPORT SLEEF_CONST __m256i Sleef_ilogbd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m256i Sleef_cinz_ilogbd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmad8_avx512fnofma(__m512d, __m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fmad8_avx512fnofma(__m512d, __m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sqrtd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_u05avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sqrtd8_u05avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sqrtd8_u35avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_hypotd8_u05avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_hypotd8_u05avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_hypotd8_u35avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_hypotd8_u35avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fabsd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fabsd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_copysignd8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_copysignd8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmaxd8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fmaxd8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmind8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fmind8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fdimd8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fdimd8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_truncd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_truncd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_floord8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_floord8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_ceild8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_ceild8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_roundd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_roundd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_rintd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_rintd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_nextafterd8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_nextafterd8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_frfrexpd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_frfrexpd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m256i Sleef_expfrexpd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m256i Sleef_cinz_expfrexpd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmodd8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fmodd8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_remainderd8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_remainderd8_avx512fnofma(__m512d, __m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_modfd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_cinz_modfd8_avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_lgammad8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_lgammad8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tgammad8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_tgammad8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_erfd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_erfd8_u10avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_erfcd8_u15avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_erfcd8_u15avx512fnofma(__m512d);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd8_avx512fnofma(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd8_avx512fnofma(int);
#ifndef Sleef___m512_2_DEFINED
typedef struct {
__m512 x, y;
} Sleef___m512_2;
#define Sleef___m512_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sinf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cosf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_cosf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincosf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_cinz_sincosf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_tanf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_asinf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acosf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_acosf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_atanf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atan2f16_u35avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_atan2f16_u35avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_logf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_logf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cbrtf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_cbrtf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sinf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cosf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_cosf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincosf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_cinz_sincosf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_tanf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_asinf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acosf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_acosf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_atanf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atan2f16_u10avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_atan2f16_u10avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_logf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_logf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cbrtf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_cbrtf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_expf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_expf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_powf16_u10avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_powf16_u10avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinhf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sinhf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_coshf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_coshf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanhf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_tanhf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinhf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sinhf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_coshf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_coshf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanhf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_tanhf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastsinf16_u3500avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fastsinf16_u3500avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastcosf16_u3500avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fastcosf16_u3500avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastpowf16_u3500avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fastpowf16_u3500avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinhf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_asinhf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acoshf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_acoshf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanhf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_atanhf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp2f16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_exp2f16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp2f16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_exp2f16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp10f16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_exp10f16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp10f16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_exp10f16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_expm1f16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_expm1f16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log10f16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_log10f16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log2f16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_log2f16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log2f16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_log2f16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log1pf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_log1pf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincospif16_u05avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_cinz_sincospif16_u05avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincospif16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_cinz_sincospif16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinpif16_u05avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sinpif16_u05avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cospif16_u05avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_cospif16_u05avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmaf16_avx512fnofma(__m512, __m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fmaf16_avx512fnofma(__m512, __m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sqrtf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_u05avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sqrtf16_u05avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sqrtf16_u35avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_hypotf16_u05avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_hypotf16_u05avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_hypotf16_u35avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_hypotf16_u35avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fabsf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fabsf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_copysignf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_copysignf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmaxf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fmaxf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fminf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fminf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fdimf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fdimf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_truncf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_truncf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_floorf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_floorf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_ceilf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_ceilf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_roundf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_roundf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_rintf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_rintf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_nextafterf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_nextafterf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_frfrexpf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_frfrexpf16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmodf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fmodf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_remainderf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_remainderf16_avx512fnofma(__m512, __m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_modff16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_cinz_modff16_avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_lgammaf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_lgammaf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tgammaf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_tgammaf16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_erff16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_erff16_u10avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_erfcf16_u15avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_erfcf16_u15avx512fnofma(__m512);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf16_avx512fnofma(int);
SLEEF_IMPORT SLEEF_CONST int Sleef_cinz_getIntf16_avx512fnofma(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf16_avx512fnofma(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_cinz_getPtrf16_avx512fnofma(int);
#endif
#ifdef __STDC__
#ifndef Sleef_double_2_DEFINED
typedef Sleef_double2 Sleef_double_2;
#define Sleef_double_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST double Sleef_sind1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sind1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cosd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_cosd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincosd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_cinz_sincosd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_tand1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_tand1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_asind1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_asind1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_acosd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_acosd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_atand1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_atand1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_atan2d1_u35purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_atan2d1_u35purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_logd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_logd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cbrtd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_cbrtd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sind1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sind1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cosd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_cosd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincosd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_cinz_sincosd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_tand1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_tand1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_asind1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_asind1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_acosd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_acosd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_atand1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_atand1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_atan2d1_u10purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_atan2d1_u10purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_logd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_logd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cbrtd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_cbrtd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_expd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_expd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_powd1_u10purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_powd1_u10purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sinhd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sinhd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_coshd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_coshd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_tanhd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_tanhd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sinhd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sinhd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_coshd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_coshd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_tanhd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_tanhd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fastsind1_u3500purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fastsind1_u3500purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fastcosd1_u3500purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fastcosd1_u3500purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fastpowd1_u3500purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fastpowd1_u3500purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_asinhd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_asinhd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_acoshd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_acoshd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_atanhd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_atanhd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_exp2d1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_exp2d1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_exp2d1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_exp2d1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_exp10d1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_exp10d1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_exp10d1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_exp10d1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_expm1d1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_expm1d1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_log10d1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_log10d1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_log2d1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_log2d1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_log2d1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_log2d1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_log1pd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_log1pd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincospid1_u05purec(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_cinz_sincospid1_u05purec(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincospid1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_cinz_sincospid1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sinpid1_u05purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sinpid1_u05purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cospid1_u05purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_cospid1_u05purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_ldexpd1_purec(double, int32_t);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_ldexpd1_purec(double, int32_t);
SLEEF_IMPORT SLEEF_CONST int32_t Sleef_ilogbd1_purec(double);
SLEEF_IMPORT SLEEF_CONST int32_t Sleef_cinz_ilogbd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fmad1_purec(double, double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fmad1_purec(double, double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sqrtd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_u05purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sqrtd1_u05purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sqrtd1_u35purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_hypotd1_u05purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_hypotd1_u05purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_hypotd1_u35purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_hypotd1_u35purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fabsd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fabsd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_copysignd1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_copysignd1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fmaxd1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fmaxd1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fmind1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fmind1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fdimd1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fdimd1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_truncd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_truncd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_floord1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_floord1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_ceild1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_ceild1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_roundd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_roundd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_rintd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_rintd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_nextafterd1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_nextafterd1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_frfrexpd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_frfrexpd1_purec(double);
SLEEF_IMPORT SLEEF_CONST int32_t Sleef_expfrexpd1_purec(double);
SLEEF_IMPORT SLEEF_CONST int32_t Sleef_cinz_expfrexpd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fmodd1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fmodd1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_remainderd1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_remainderd1_purec(double, double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_modfd1_purec(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_cinz_modfd1_purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_lgammad1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_lgammad1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_tgammad1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_tgammad1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_erfd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_erfd1_u10purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_erfcd1_u15purec(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_erfcd1_u15purec(double);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd1_purec(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd1_purec(int);
#ifndef Sleef_float_2_DEFINED
typedef Sleef_float2 Sleef_float_2;
#define Sleef_float_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST float Sleef_sinf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sinf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cosf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_cosf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincosf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_cinz_sincosf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_tanf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_tanf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_asinf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_asinf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_acosf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_acosf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_atanf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_atanf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f1_u35purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_atan2f1_u35purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_logf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_logf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_cbrtf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sinf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sinf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cosf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_cosf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincosf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_cinz_sincosf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_tanf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_tanf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_asinf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_asinf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_acosf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_acosf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_atanf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_atanf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f1_u10purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_atan2f1_u10purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_logf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_logf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_cbrtf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_expf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_expf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_powf1_u10purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_powf1_u10purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sinhf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_coshf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_coshf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_tanhf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sinhf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_coshf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_coshf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_tanhf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fastsinf1_u3500purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fastsinf1_u3500purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fastcosf1_u3500purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fastcosf1_u3500purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fastpowf1_u3500purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fastpowf1_u3500purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_asinhf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_asinhf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_acoshf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_acoshf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_atanhf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_atanhf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_exp2f1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_exp2f1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_exp10f1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_exp10f1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_expm1f1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_expm1f1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_log10f1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_log10f1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_log2f1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_log2f1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_log2f1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_log2f1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_log1pf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_log1pf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincospif1_u05purec(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_cinz_sincospif1_u05purec(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincospif1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_cinz_sincospif1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sinpif1_u05purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sinpif1_u05purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cospif1_u05purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_cospif1_u05purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fmaf1_purec(float, float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fmaf1_purec(float, float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sqrtf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_u05purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sqrtf1_u05purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sqrtf1_u35purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf1_u05purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_hypotf1_u05purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf1_u35purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_hypotf1_u35purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fabsf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fabsf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_copysignf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_copysignf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fmaxf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fmaxf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fminf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fminf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fdimf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fdimf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_truncf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_truncf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_floorf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_floorf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_ceilf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_ceilf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_roundf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_roundf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_rintf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_rintf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_nextafterf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_nextafterf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_frfrexpf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_frfrexpf1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fmodf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fmodf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_remainderf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_remainderf1_purec(float, float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_modff1_purec(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_cinz_modff1_purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_lgammaf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_lgammaf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_tgammaf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_tgammaf1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_erff1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_erff1_u10purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_erfcf1_u15purec(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_erfcf1_u15purec(float);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf1_purec(int);
SLEEF_IMPORT SLEEF_CONST int Sleef_cinz_getIntf1_purec(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf1_purec(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_cinz_getPtrf1_purec(int);
#endif
#ifdef __STDC__
#ifndef Sleef_double_2_DEFINED
typedef Sleef_double2 Sleef_double_2;
#define Sleef_double_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST double Sleef_sind1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sind1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cosd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_cosd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincosd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_finz_sincosd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_tand1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_tand1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_asind1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_asind1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_acosd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_acosd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_atand1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_atand1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_atan2d1_u35purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_atan2d1_u35purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_logd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_logd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cbrtd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_cbrtd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sind1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sind1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cosd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_cosd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincosd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_finz_sincosd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_tand1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_tand1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_asind1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_asind1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_acosd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_acosd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_atand1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_atand1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_atan2d1_u10purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_atan2d1_u10purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_logd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_logd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cbrtd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_cbrtd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_expd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_expd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_powd1_u10purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_powd1_u10purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sinhd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sinhd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_coshd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_coshd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_tanhd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_tanhd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sinhd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sinhd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_coshd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_coshd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_tanhd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_tanhd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fastsind1_u3500purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fastsind1_u3500purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fastcosd1_u3500purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fastcosd1_u3500purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fastpowd1_u3500purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fastpowd1_u3500purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_asinhd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_asinhd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_acoshd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_acoshd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_atanhd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_atanhd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_exp2d1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_exp2d1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_exp2d1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_exp2d1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_exp10d1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_exp10d1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_exp10d1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_exp10d1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_expm1d1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_expm1d1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_log10d1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_log10d1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_log2d1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_log2d1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_log2d1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_log2d1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_log1pd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_log1pd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincospid1_u05purecfma(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_finz_sincospid1_u05purecfma(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincospid1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_finz_sincospid1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sinpid1_u05purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sinpid1_u05purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_cospid1_u05purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_cospid1_u05purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_ldexpd1_purecfma(double, int32_t);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_ldexpd1_purecfma(double, int32_t);
SLEEF_IMPORT SLEEF_CONST int32_t Sleef_ilogbd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST int32_t Sleef_finz_ilogbd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fmad1_purecfma(double, double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fmad1_purecfma(double, double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sqrtd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_u05purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sqrtd1_u05purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sqrtd1_u35purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_hypotd1_u05purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_hypotd1_u05purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_hypotd1_u35purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_hypotd1_u35purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fabsd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fabsd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_copysignd1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_copysignd1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fmaxd1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fmaxd1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fmind1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fmind1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fdimd1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fdimd1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_truncd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_truncd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_floord1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_floord1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_ceild1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_ceild1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_roundd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_roundd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_rintd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_rintd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_nextafterd1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_nextafterd1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_frfrexpd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_frfrexpd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST int32_t Sleef_expfrexpd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST int32_t Sleef_finz_expfrexpd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fmodd1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fmodd1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_remainderd1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_remainderd1_purecfma(double, double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_modfd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_finz_modfd1_purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_lgammad1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_lgammad1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_tgammad1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_tgammad1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_erfd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_erfd1_u10purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_erfcd1_u15purecfma(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_finz_erfcd1_u15purecfma(double);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd1_purecfma(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd1_purecfma(int);
#ifndef Sleef_float_2_DEFINED
typedef Sleef_float2 Sleef_float_2;
#define Sleef_float_2_DEFINED
#endif
SLEEF_IMPORT SLEEF_CONST float Sleef_sinf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sinf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cosf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_cosf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincosf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_finz_sincosf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_tanf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_tanf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_asinf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_asinf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_acosf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_acosf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_atanf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_atanf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f1_u35purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_atan2f1_u35purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_logf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_logf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_cbrtf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sinf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sinf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cosf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_cosf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincosf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_finz_sincosf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_tanf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_tanf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_asinf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_asinf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_acosf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_acosf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_atanf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_atanf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f1_u10purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_atan2f1_u10purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_logf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_logf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_cbrtf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_expf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_expf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_powf1_u10purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_powf1_u10purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sinhf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_coshf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_coshf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_tanhf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sinhf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_coshf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_coshf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_tanhf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fastsinf1_u3500purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fastsinf1_u3500purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fastcosf1_u3500purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fastcosf1_u3500purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fastpowf1_u3500purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fastpowf1_u3500purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_asinhf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_asinhf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_acoshf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_acoshf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_atanhf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_atanhf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_exp2f1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_exp2f1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_exp10f1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_exp10f1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_expm1f1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_expm1f1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_log10f1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_log10f1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_log2f1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_log2f1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_log2f1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_log2f1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_log1pf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_log1pf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincospif1_u05purecfma(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_finz_sincospif1_u05purecfma(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincospif1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_finz_sincospif1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sinpif1_u05purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sinpif1_u05purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_cospif1_u05purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_cospif1_u05purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fmaf1_purecfma(float, float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fmaf1_purecfma(float, float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sqrtf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_u05purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sqrtf1_u05purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sqrtf1_u35purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf1_u05purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_hypotf1_u05purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf1_u35purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_hypotf1_u35purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fabsf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fabsf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_copysignf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_copysignf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fmaxf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fmaxf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fminf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fminf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fdimf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fdimf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_truncf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_truncf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_floorf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_floorf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_ceilf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_ceilf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_roundf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_roundf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_rintf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_rintf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_nextafterf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_nextafterf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_frfrexpf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_frfrexpf1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_fmodf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fmodf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_remainderf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_remainderf1_purecfma(float, float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_modff1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_finz_modff1_purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_lgammaf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_lgammaf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_tgammaf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_tgammaf1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_erff1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_erff1_u10purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_erfcf1_u15purecfma(float);
SLEEF_IMPORT SLEEF_CONST float Sleef_finz_erfcf1_u15purecfma(float);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf1_purecfma(int);
SLEEF_IMPORT SLEEF_CONST int Sleef_finz_getIntf1_purecfma(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf1_purecfma(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_finz_getPtrf1_purecfma(int);
#endif
#ifdef __STDC__
#ifndef Sleef_double_2_DEFINED
typedef Sleef_double2 Sleef_double_2;
#define Sleef_double_2_DEFINED
#endif
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sind1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cosd1_u35(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincosd1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tand1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_asind1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_acosd1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atand1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atan2d1_u35(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_logd1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cbrtd1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sind1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cosd1_u10(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincosd1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tand1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_asind1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_acosd1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atand1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atan2d1_u10(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_logd1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cbrtd1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_expd1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_powd1_u10(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sinhd1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_coshd1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tanhd1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sinhd1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_coshd1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tanhd1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fastsind1_u3500(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fastcosd1_u3500(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fastpowd1_u3500(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_asinhd1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_acoshd1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atanhd1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp2d1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp2d1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp10d1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp10d1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_expm1d1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log10d1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log2d1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log2d1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log1pd1_u10(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincospid1_u05(double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincospid1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sinpid1_u05(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cospid1_u05(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_ldexpd1(double, int32_t);
SLEEF_IMPORT SLEEF_CONST int32_t Sleef_ilogbd1(double);
SLEEF_IMPORT SLEEF_CONST double Sleef_fmad1(double, double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_u05(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_u35(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_hypotd1_u05(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_hypotd1_u35(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fabsd1(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_copysignd1(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fmaxd1(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fmind1(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fdimd1(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_truncd1(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_floord1(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_ceild1(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_roundd1(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_rintd1(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_nextafterd1(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_frfrexpd1(double);
SLEEF_IMPORT SLEEF_CONST int32_t Sleef_expfrexpd1(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fmodd1(double, double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_remainderd1(double, double);
SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_modfd1(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_lgammad1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tgammad1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_erfd1_u10(double);
SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_erfcd1_u15(double);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd1(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd1(int);
#ifndef Sleef_float_2_DEFINED
typedef Sleef_float2 Sleef_float_2;
#define Sleef_float_2_DEFINED
#endif
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinf1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cosf1_u35(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincosf1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanf1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_asinf1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_acosf1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atanf1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f1_u35(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_logf1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cosf1_u10(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincosf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_asinf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_acosf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atanf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f1_u10(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_logf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_expf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_powf1_u10(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_coshf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_coshf1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fastsinf1_u3500(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fastcosf1_u3500(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fastpowf1_u3500(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_asinhf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_acoshf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atanhf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_expm1f1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log10f1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log2f1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log2f1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log1pf1_u10(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincospif1_u05(float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincospif1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinpif1_u05(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cospif1_u05(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fmaf1(float, float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_u05(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_u35(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf1_u05(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf1_u35(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fabsf1(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_copysignf1(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fmaxf1(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fminf1(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fdimf1(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_truncf1(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_floorf1(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_ceilf1(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_roundf1(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_rintf1(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_nextafterf1(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_frfrexpf1(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fmodf1(float, float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_remainderf1(float, float);
SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_modff1(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_lgammaf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tgammaf1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_erff1_u10(float);
SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_erfcf1_u15(float);
SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf1(int);
SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf1(int);
#endif
//
#ifdef __cplusplus
} // extern "C"
#endif
#endif // #ifndef __SLEEF_H__
```
|
===============================================================================================================================
SOURCE CODE FILE: CudaIPCTypes.h
LINES: 1
SIZE: 3.46 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\CudaIPCTypes.h
ENCODING: utf-8
```h
#pragma once
#ifdef USE_CUDA
#include <c10/core/Allocator.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <c10/cuda/CUDAException.h>
#include <c10/util/Logging.h>
#include <cuda_runtime_api.h>
#include <torch/csrc/Export.h>
#include <cstddef>
namespace torch {
TORCH_CUDA_CU_API bool CudaIPCCollect();
struct CudaIPCReceivedData final {
CudaIPCReceivedData() = default;
explicit CudaIPCReceivedData(std::shared_ptr<void> shared_ptr)
: shared_ptr_(std::move(shared_ptr)) {}
std::shared_ptr<void> shared_ptr_;
};
struct CudaIPCSentData final {
std::string handle_;
uint64_t offset_;
uint64_t* counter_ptr_; // Reference counter shared memory block
at::DataPtr original_ptr_; // Original mem allocation
cudaEvent_t event_; // Sync cuEventDestroy
bool event_sync_required_;
at::Device device_;
CudaIPCSentData(
std::string handle,
uint64_t offset,
uint64_t* counter_ptr,
at::Device device);
~CudaIPCSentData();
uint64_t counter_value();
std::string handle() {
return handle_;
}
uint64_t offset() {
return offset_;
}
void set_original_ptr(at::DataPtr data_ptr) {
original_ptr_ = std::move(data_ptr);
}
};
TORCH_CUDA_CU_API at::DataPtr GetNewRefCountedSentData(
void* data,
at::Device device);
namespace {
inline constexpr int64_t CUDA_IPC_REF_COUNTER_FILE_SIZE = 10000;
inline constexpr int64_t CUDA_IPC_WARN_AFTER_X_BLOCKS_IN_LIMBO = 1000;
// This was determined empirically that CUDA (v10.1 and below) have the limit
// on the number of recorded blocking interprocess events. It is around ~22,000.
// And to give us leeway, we picked 1000 as it gives us enough events to share
// tensors effectively.
inline constexpr int64_t CUDA_IPC_MAXIMUM_EVENTS_TO_USE = 1000;
// All to be deleted data blocks with non zero reference counter goes there
struct CudaIPCSentDataLimbo final {
~CudaIPCSentDataLimbo();
bool collect();
void add(std::unique_ptr<CudaIPCSentData> shared_block);
uint64_t size();
private:
// TODO: Can be changed to FIFO in order to avoid full traverse on every
// collect()
std::vector<std::unique_ptr<CudaIPCSentData>> shared_blocks_;
std::mutex limbo_mutex_;
};
struct CudaIPCRefCountersFile final {
CudaIPCRefCountersFile(
std::string handle,
uint64_t size,
at::DataPtr data_ptr)
: size_(size),
handle_(std::move(handle)),
refcounted_shared_mem_(std::move(data_ptr)) {}
uint64_t* counter_ptr() {
return static_cast<uint64_t*>(refcounted_shared_mem_.get()) + next_offset_;
}
void set_counter(uint64_t value) {
*counter_ptr() = value;
}
bool have_offsets() {
return next_offset_ < size_;
}
bool offsets_in_use() {
return used_slots_;
}
uint64_t get_offset() {
return next_offset_;
}
void rotate_offset() {
next_offset_++;
used_slots_++;
}
void return_offset(uint64_t offset /* unused */) {
used_slots_--;
}
std::string handle() {
return handle_;
}
private:
uint64_t next_offset_{0};
uint64_t size_;
uint64_t used_slots_{0};
std::string handle_;
at::DataPtr refcounted_shared_mem_;
};
} // namespace
} // namespace torch
namespace c10 {
namespace {
class CudaIPCCollectCallback : public FreeMemoryCallback {
public:
bool Execute() override {
return torch::CudaIPCCollect();
}
};
} // namespace
} // namespace c10
#endif
```
|
=============================================================================================================================
SOURCE CODE FILE: DataLoader.h
LINES: 1
SIZE: 0.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\DataLoader.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables,modernize-avoid-c-arrays)
extern PyMethodDef DataLoaderMethods[];
```
|
=========================================================================================================================
SOURCE CODE FILE: Device.h
LINES: 1
SIZE: 0.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\Device.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
#include <ATen/Device.h>
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct TORCH_API THPDevice {
PyObject_HEAD
at::Device device;
};
TORCH_API extern PyTypeObject THPDeviceType;
inline bool THPDevice_Check(PyObject* obj) {
return Py_TYPE(obj) == &THPDeviceType;
}
TORCH_API PyObject* THPDevice_New(const at::Device& device);
TORCH_API void THPDevice_init(PyObject* module);
```
|
====================================================================================================================================
SOURCE CODE FILE: DeviceAccelerator.h
LINES: 1
SIZE: 0.18 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\DeviceAccelerator.h
ENCODING: utf-8
```h
#include <ATen/DeviceAccelerator.h>
#include <torch/csrc/utils/pybind.h>
namespace torch::accelerator {
void initModule(PyObject* module);
} // namespace torch::accelerator
```
|
========================================================================================================================
SOURCE CODE FILE: Dtype.h
LINES: 1
SIZE: 0.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\Dtype.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/ScalarType.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
constexpr int DTYPE_NAME_LEN = 64;
struct TORCH_API THPDtype {
PyObject_HEAD
at::ScalarType scalar_type;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
char name[DTYPE_NAME_LEN + 1];
};
TORCH_API extern PyTypeObject THPDtypeType;
inline bool THPDtype_Check(PyObject* obj) {
return Py_TYPE(obj) == &THPDtypeType;
}
inline bool THPPythonScalarType_Check(PyObject* obj) {
return obj == (PyObject*)(&PyFloat_Type) ||
obj == (PyObject*)(&PyComplex_Type) || obj == (PyObject*)(&PyBool_Type) ||
obj == (PyObject*)(&PyLong_Type);
}
TORCH_API PyObject* THPDtype_New(
at::ScalarType scalar_type,
const std::string& name);
void THPDtype_init(PyObject* module);
```
|
===============================================================================================================================
SOURCE CODE FILE: DynamicTypes.h
LINES: 1
SIZE: 1.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\DynamicTypes.h
ENCODING: utf-8
```h
#pragma once
// Provides conversions between Python tensor objects and at::Tensor.
#include <torch/csrc/python_headers.h>
#include <ATen/Device.h>
#include <c10/core/Backend.h>
#include <c10/core/Layout.h>
#include <c10/core/ScalarType.h>
#include <c10/core/ScalarTypeToTypeMeta.h>
#include <torch/csrc/Export.h>
#include <memory>
#include <string>
struct THPDtype;
struct THPLayout;
namespace c10 {
struct Storage;
}
namespace torch {
void registerDtypeObject(THPDtype* dtype, at::ScalarType scalarType);
void registerLayoutObject(THPLayout* thp_layout, at::Layout layout);
TORCH_PYTHON_API PyObject* createPyObject(const at::Storage& storage);
TORCH_PYTHON_API at::Storage createStorage(PyObject* obj);
TORCH_PYTHON_API std::tuple<at::Storage, at::ScalarType, bool>
createStorageGetType(PyObject* obj);
TORCH_PYTHON_API bool isStorage(PyObject* obj);
// Both methods below return a borrowed reference!
TORCH_PYTHON_API THPDtype* getTHPDtype(at::ScalarType scalarType);
TORCH_PYTHON_API THPLayout* getTHPLayout(at::Layout layout);
} // namespace torch
```
|
========================================================================================================================
SOURCE CODE FILE: Event.h
LINES: 1
SIZE: 0.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\Event.h
ENCODING: utf-8
```h
#ifndef THP_EVENT_INC
#define THP_EVENT_INC
#include <c10/core/Event.h>
#include <torch/csrc/python_headers.h>
struct TORCH_API THPEvent {
PyObject_HEAD
c10::Event event;
};
TORCH_API extern PyTypeObject* THPEventClass;
TORCH_API extern PyTypeObject THPEventType;
TORCH_API void THPEvent_init(PyObject* module);
TORCH_API PyObject* THPEvent_new(
c10::DeviceType device_type,
c10::EventFlag flag);
inline bool THPEvent_Check(PyObject* obj) {
return THPEventClass && PyObject_IsInstance(obj, (PyObject*)THPEventClass);
}
#endif // THP_EVENT_INC
```
|
=============================================================================================================================
SOURCE CODE FILE: Exceptions.h
LINES: 1
SIZE: 15.42 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\Exceptions.h
ENCODING: utf-8
```h
#pragma once
#include <exception>
#include <memory>
#include <string>
#include <system_error>
#include <ATen/detail/FunctionTraits.h>
#include <c10/util/C++17.h>
#include <c10/util/Exception.h>
#include <c10/util/StringUtil.h>
#include <pybind11/pybind11.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/runtime/jit_exception.h>
#include <torch/csrc/utils/cpp_stacktraces.h>
#include <torch/csrc/utils/pybind.h>
#if defined(USE_DISTRIBUTED)
#include <torch/csrc/distributed/c10d/exception.h>
#endif
inline void PyErr_SetString(PyObject* type, const std::string& message) {
PyErr_SetString(type, message.c_str());
}
/// NOTE [ Conversion Cpp Python Warning ]
/// The warning handler cannot set python warnings immediately
/// as it requires acquiring the GIL (potential deadlock)
/// and would need to cleanly exit if the warning raised a
/// python error. To solve this, we buffer the warnings and
/// process them when we go back to python.
/// This requires the two try/catch blocks below to handle the
/// following cases:
/// - If there is no Error raised in the inner try/catch, the
/// buffered warnings are processed as python warnings.
/// - If they don't raise an error, the function process with the
/// original return code.
/// - If any of them raise an error, the error is set (PyErr_*) and
/// the destructor will raise a cpp exception python_error() that
/// will be caught by the outer try/catch that will be able to change
/// the return value of the function to reflect the error.
/// - If an Error was raised in the inner try/catch, the inner try/catch
/// must set the python error. The buffered warnings are then
/// processed as cpp warnings as we cannot predict before hand
/// whether a python warning will raise an error or not and we
/// cannot handle two errors at the same time.
/// This advanced handler will only be used in the current thread.
/// If any other thread is used, warnings will be processed as
/// cpp warnings.
#define HANDLE_TH_ERRORS \
try { \
torch::PyWarningHandler __enforce_warning_buffer; \
try {
#define _CATCH_GENERIC_ERROR(ErrorType, PythonErrorType, retstmnt) \
catch (const c10::ErrorType& e) { \
auto msg = torch::get_cpp_stacktraces_enabled() \
? e.what() \
: e.what_without_backtrace(); \
PyErr_SetString(PythonErrorType, torch::processErrorMsg(msg)); \
retstmnt; \
}
// Only catch torch-specific exceptions
#define CATCH_CORE_ERRORS(retstmnt) \
catch (python_error & e) { \
e.restore(); \
retstmnt; \
} \
catch (py::error_already_set & e) { \
e.restore(); \
retstmnt; \
} \
_CATCH_GENERIC_ERROR(IndexError, PyExc_IndexError, retstmnt) \
_CATCH_GENERIC_ERROR(ValueError, PyExc_ValueError, retstmnt) \
_CATCH_GENERIC_ERROR(TypeError, PyExc_TypeError, retstmnt) \
_CATCH_GENERIC_ERROR( \
NotImplementedError, PyExc_NotImplementedError, retstmnt) \
_CATCH_GENERIC_ERROR(SyntaxError, PyExc_SyntaxError, retstmnt) \
_CATCH_GENERIC_ERROR(LinAlgError, THPException_LinAlgError, retstmnt) \
_CATCH_GENERIC_ERROR( \
OutOfMemoryError, THPException_OutOfMemoryError, retstmnt) \
_CATCH_GENERIC_ERROR( \
DistBackendError, THPException_DistBackendError, retstmnt) \
_CATCH_GENERIC_ERROR( \
DistNetworkError, THPException_DistNetworkError, retstmnt) \
_CATCH_GENERIC_ERROR(DistStoreError, THPException_DistStoreError, retstmnt) \
_CATCH_GENERIC_ERROR(DistError, THPException_DistError, retstmnt) \
_CATCH_GENERIC_ERROR(Error, PyExc_RuntimeError, retstmnt) \
catch (torch::PyTorchError & e) { \
auto msg = torch::processErrorMsg(e.what()); \
PyErr_SetString(e.python_type(), msg); \
retstmnt; \
}
#define CATCH_TH_ERRORS(retstmnt) CATCH_CORE_ERRORS(retstmnt)
#define CATCH_ALL_ERRORS(retstmnt) \
CATCH_TH_ERRORS(retstmnt) \
catch (const std::exception& e) { \
auto msg = torch::processErrorMsg(e.what()); \
PyErr_SetString(PyExc_RuntimeError, msg); \
retstmnt; \
}
#define END_HANDLE_TH_ERRORS_PYBIND \
} \
catch (...) { \
__enforce_warning_buffer.set_in_exception(); \
throw; \
} \
} \
catch (py::error_already_set & e) { \
throw; \
} \
catch (py::builtin_exception & e) { \
throw; \
} \
catch (torch::jit::JITException & e) { \
throw; \
} \
catch (const std::exception& e) { \
torch::translate_exception_to_python(std::current_exception()); \
throw py::error_already_set(); \
}
#define END_HANDLE_TH_ERRORS_RET(retval) \
} \
catch (...) { \
__enforce_warning_buffer.set_in_exception(); \
throw; \
} \
} \
catch (const std::exception& e) { \
torch::translate_exception_to_python(std::current_exception()); \
return retval; \
}
#define END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS_RET(nullptr)
extern PyObject *THPException_FatalError, *THPException_LinAlgError,
*THPException_OutOfMemoryError, *THPException_DistError,
*THPException_DistBackendError, *THPException_DistNetworkError,
*THPException_DistStoreError;
// Throwing this exception means that the python error flags have been already
// set and control should be immediately returned to the interpreter.
struct python_error : public std::exception {
python_error() = default;
python_error(const python_error& other)
: type(other.type),
value(other.value),
traceback(other.traceback),
message(other.message) {
pybind11::gil_scoped_acquire gil;
Py_XINCREF(type);
Py_XINCREF(value);
Py_XINCREF(traceback);
}
python_error(python_error&& other) noexcept
: type(other.type),
value(other.value),
traceback(other.traceback),
message(std::move(other.message)) {
other.type = nullptr;
other.value = nullptr;
other.traceback = nullptr;
}
python_error& operator=(const python_error& other) = delete;
python_error& operator=(python_error&& other) = delete;
// NOLINTNEXTLINE(bugprone-exception-escape)
~python_error() override {
if (type || value || traceback) {
pybind11::gil_scoped_acquire gil;
Py_XDECREF(type);
Py_XDECREF(value);
Py_XDECREF(traceback);
}
}
const char* what() const noexcept override {
return message.c_str();
}
void build_message() {
// Ensure we have the GIL.
pybind11::gil_scoped_acquire gil;
// No errors should be set when we enter the function since PyErr_Fetch
// clears the error indicator.
TORCH_INTERNAL_ASSERT(!PyErr_Occurred());
// Default message.
message = "python_error";
// Try to retrieve the error message from the value.
if (value != nullptr) {
// Reference count should not be zero.
TORCH_INTERNAL_ASSERT(Py_REFCNT(value) > 0);
PyObject* pyStr = PyObject_Str(value);
if (pyStr != nullptr) {
PyObject* encodedString =
PyUnicode_AsEncodedString(pyStr, "utf-8", "strict");
if (encodedString != nullptr) {
char* bytes = PyBytes_AS_STRING(encodedString);
if (bytes != nullptr) {
// Set the message.
message = std::string(bytes);
}
Py_XDECREF(encodedString);
}
Py_XDECREF(pyStr);
}
}
// Clear any errors since we don't want to propagate errors for functions
// that are trying to build a string for the error message.
PyErr_Clear();
}
/** Saves the exception so that it can be re-thrown on a different thread */
inline void persist() {
if (type)
return; // Don't overwrite exceptions
// PyErr_Fetch overwrites the pointers
pybind11::gil_scoped_acquire gil;
Py_XDECREF(type);
Py_XDECREF(value);
Py_XDECREF(traceback);
PyErr_Fetch(&type, &value, &traceback);
build_message();
}
/** Sets the current Python error from this exception */
inline void restore() {
if (!type)
return;
// PyErr_Restore steals references
pybind11::gil_scoped_acquire gil;
Py_XINCREF(type);
Py_XINCREF(value);
Py_XINCREF(traceback);
PyErr_Restore(type, value, traceback);
}
PyObject* type{nullptr};
PyObject* value{nullptr};
PyObject* traceback{nullptr};
// Message to return to the user when 'what()' is invoked.
std::string message;
};
bool THPException_init(PyObject* module);
namespace torch {
// Set python current exception from a C++ exception
TORCH_PYTHON_API void translate_exception_to_python(const std::exception_ptr&);
TORCH_PYTHON_API std::string processErrorMsg(std::string str);
// Abstract base class for exceptions which translate to specific Python types
struct PyTorchError : public std::exception {
PyTorchError() = default;
PyTorchError(std::string msg_) : msg(std::move(msg_)) {}
virtual PyObject* python_type() = 0;
const char* what() const noexcept override {
return msg.c_str();
}
std::string msg;
};
// Declare a printf-like function on gcc & clang
// The compiler can then warn on invalid format specifiers
#ifdef __GNUC__
#define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX) \
__attribute__((format(printf, FORMAT_INDEX, VA_ARGS_INDEX)))
#else
#define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX)
#endif
// Translates to Python TypeError
struct TypeError : public PyTorchError {
using PyTorchError::PyTorchError;
TORCH_PYTHON_API TypeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
PyObject* python_type() override {
return PyExc_TypeError;
}
};
// Translates to Python AttributeError
struct AttributeError : public PyTorchError {
AttributeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
PyObject* python_type() override {
return PyExc_AttributeError;
}
};
// ATen warning handler for Python
struct PyWarningHandler {
// Move actual handler into a separate class with a noexcept
// destructor. Otherwise, we need to force all WarningHandler
// subclasses to have a noexcept(false) destructor.
struct InternalHandler : at::WarningHandler {
~InternalHandler() override = default;
void process(const c10::Warning& warning) override;
std::vector<c10::Warning> warning_buffer_;
};
public:
/// See NOTE [ Conversion Cpp Python Warning ] for noexcept justification
TORCH_PYTHON_API PyWarningHandler() noexcept(true);
// NOLINTNEXTLINE(bugprone-exception-escape)
TORCH_PYTHON_API ~PyWarningHandler() noexcept(false);
/** Call if an exception has been thrown
* Necessary to determine if it is safe to throw from the desctructor since
* std::uncaught_exception is buggy on some platforms and generally
* unreliable across dynamic library calls.
*/
void set_in_exception() {
in_exception_ = true;
}
private:
InternalHandler internal_handler_;
at::WarningHandler* prev_handler_;
bool in_exception_;
};
namespace detail {
struct noop_gil_scoped_release {
// user-defined constructor (i.e. not defaulted) to avoid
// unused-variable warnings at usage sites of this class
// NOLINTNEXTLINE(modernize-use-equals-default)
noop_gil_scoped_release() {}
};
template <bool release_gil>
using conditional_gil_scoped_release = std::conditional_t<
release_gil,
pybind11::gil_scoped_release,
noop_gil_scoped_release>;
template <typename Func, size_t i>
using Arg = typename invoke_traits<Func>::template arg<i>::type;
template <typename Func, size_t... Is, bool release_gil>
auto wrap_pybind_function_impl_(
Func&& f,
std::index_sequence<Is...>,
std::bool_constant<release_gil>) {
namespace py = pybind11;
// f=f is needed to handle function references on older compilers
return [f = std::forward<Func>(f)](Arg<Func, Is>... args) {
HANDLE_TH_ERRORS
conditional_gil_scoped_release<release_gil> no_gil;
return std::invoke(f, std::forward<Arg<Func, Is>>(args)...);
END_HANDLE_TH_ERRORS_PYBIND
};
}
} // namespace detail
// Wrap a function with TH error and warning handling.
// Returns a function object suitable for registering with pybind11.
template <typename Func>
auto wrap_pybind_function(Func&& f) {
using traits = invoke_traits<Func>;
return torch::detail::wrap_pybind_function_impl_(
std::forward<Func>(f),
std::make_index_sequence<traits::arity>{},
std::false_type{});
}
// Wrap a function with TH error, warning handling and releases the GIL.
// Returns a function object suitable for registering with pybind11.
template <typename Func>
auto wrap_pybind_function_no_gil(Func&& f) {
using traits = invoke_traits<Func>;
return torch::detail::wrap_pybind_function_impl_(
std::forward<Func>(f),
std::make_index_sequence<traits::arity>{},
std::true_type{});
}
} // namespace torch
```
|
=========================================================================================================================
SOURCE CODE FILE: Export.h
LINES: 1
SIZE: 0.16 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\Export.h
ENCODING: utf-8
```h
#pragma once
#include <c10/macros/Export.h>
#ifdef THP_BUILD_MAIN_LIB
#define TORCH_PYTHON_API C10_EXPORT
#else
#define TORCH_PYTHON_API C10_IMPORT
#endif
```
|
============================================================================================================================
SOURCE CODE FILE: Generator.h
LINES: 1
SIZE: 1.06 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\Generator.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/Generator.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct THPGenerator {
PyObject_HEAD
at::Generator cdata;
};
// Creates a new Python object wrapping the default at::Generator. The reference
// is borrowed. The caller should ensure that the at::Generator object lifetime
// last at least as long as the Python wrapper.
TORCH_PYTHON_API PyObject* THPGenerator_initDefaultGenerator(
const at::Generator& cdata);
#define THPGenerator_Check(obj) PyObject_IsInstance(obj, THPGeneratorClass)
TORCH_PYTHON_API extern PyObject* THPGeneratorClass;
bool THPGenerator_init(PyObject* module);
TORCH_PYTHON_API PyObject* THPGenerator_Wrap(const at::Generator& gen);
TORCH_PYTHON_API at::Generator THPGenerator_Unwrap(PyObject* state);
// Creates a new Python object for a Generator. The Generator must not already
// have a PyObject* associated with it.
PyObject* THPGenerator_NewWithVar(PyTypeObject* type, at::Generator gen);
```
|
=========================================================================================================================
SOURCE CODE FILE: Layout.h
LINES: 1
SIZE: 0.60 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\Layout.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
#include <ATen/Layout.h>
#include <string>
const int LAYOUT_NAME_LEN = 64;
struct THPLayout {
PyObject_HEAD
at::Layout layout;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
char name[LAYOUT_NAME_LEN + 1];
};
TORCH_PYTHON_API extern PyTypeObject THPLayoutType;
inline bool THPLayout_Check(PyObject* obj) {
return Py_TYPE(obj) == &THPLayoutType;
}
PyObject* THPLayout_New(at::Layout layout, const std::string& name);
void THPLayout_init(PyObject* module);
```
|
===============================================================================================================================
SOURCE CODE FILE: MemoryFormat.h
LINES: 1
SIZE: 0.69 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\MemoryFormat.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
#include <c10/core/MemoryFormat.h>
#include <string>
const int MEMORY_FORMAT_NAME_LEN = 64;
struct THPMemoryFormat {
PyObject_HEAD
at::MemoryFormat memory_format;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
char name[MEMORY_FORMAT_NAME_LEN + 1];
};
TORCH_PYTHON_API extern PyTypeObject THPMemoryFormatType;
inline bool THPMemoryFormat_Check(PyObject* obj) {
return Py_TYPE(obj) == &THPMemoryFormatType;
}
PyObject* THPMemoryFormat_New(
at::MemoryFormat memory_format,
const std::string& name);
void THPMemoryFormat_init(PyObject* module);
```
|
=========================================================================================================================
SOURCE CODE FILE: Module.h
LINES: 1
SIZE: 0.10 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\Module.h
ENCODING: utf-8
```h
#ifndef THP_MODULE_INC
#define THP_MODULE_INC
#define THP_STATELESS_ATTRIBUTE_NAME "_torch"
#endif
```
|
================================================================================================================================
SOURCE CODE FILE: PyInterpreter.h
LINES: 1
SIZE: 0.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\PyInterpreter.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/impl/PyInterpreter.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/utils/pybind.h>
namespace torch::detail {
TORCH_PYTHON_API py::handle getTorchApiFunction(const c10::OperatorHandle& op);
}
// TODO: Move these to a proper namespace
TORCH_PYTHON_API c10::impl::PyInterpreter* getPyInterpreter();
TORCH_PYTHON_API bool isMainPyInterpreter();
```
|
==========================================================================================================================
SOURCE CODE FILE: QScheme.h
LINES: 1
SIZE: 0.62 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\QScheme.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
#include <c10/core/QScheme.h>
#include <string>
constexpr int QSCHEME_NAME_LEN = 64;
struct THPQScheme {
PyObject_HEAD
at::QScheme qscheme;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
char name[QSCHEME_NAME_LEN + 1];
};
TORCH_PYTHON_API extern PyTypeObject THPQSchemeType;
inline bool THPQScheme_Check(PyObject* obj) {
return Py_TYPE(obj) == &THPQSchemeType;
}
PyObject* THPQScheme_New(at::QScheme qscheme, const std::string& name);
void THPQScheme_init(PyObject* module);
```
|
=======================================================================================================================
SOURCE CODE FILE: Size.h
LINES: 1
SIZE: 0.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\Size.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/python_headers.h>
#include <cstdint>
TORCH_PYTHON_API extern PyTypeObject THPSizeType;
#define THPSize_Check(obj) (Py_TYPE(obj) == &THPSizeType)
PyObject* THPSize_New(const torch::autograd::Variable& t);
PyObject* THPSize_NewFromSizes(int64_t dim, const int64_t* sizes);
PyObject* THPSize_NewFromSymSizes(const at::Tensor& t);
void THPSize_init(PyObject* module);
```
|
==========================================================================================================================
SOURCE CODE FILE: Storage.h
LINES: 1
SIZE: 1.59 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\Storage.h
ENCODING: utf-8
```h
#ifndef THP_STORAGE_INC
#define THP_STORAGE_INC
#include <Python.h>
#include <c10/core/Storage.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/Types.h>
#define THPStorageStr "torch.UntypedStorage"
struct THPStorage {
PyObject_HEAD
c10::MaybeOwned<c10::Storage> cdata;
bool is_hermetic;
};
TORCH_PYTHON_API PyObject* THPStorage_Wrap(c10::Storage storage);
TORCH_PYTHON_API PyObject* THPStorage_NewWithStorage(
PyTypeObject* type,
c10::Storage _storage,
c10::impl::PyInterpreterStatus status,
bool allow_preexisting_pyobj = false);
TORCH_PYTHON_API extern PyTypeObject* THPStorageClass;
inline bool THPStorage_CheckTypeExact(PyTypeObject* tp) {
return tp == THPStorageClass;
}
inline bool THPStorage_CheckExact(PyObject* obj) {
return THPStorage_CheckTypeExact(Py_TYPE(obj));
}
inline bool THPStorage_Check(PyObject* obj) {
if (!THPStorageClass)
return false;
const auto result = PyObject_IsInstance(obj, (PyObject*)THPStorageClass);
if (result == -1)
throw python_error();
return result;
}
bool THPStorage_init(PyObject* module);
void THPStorage_postInit(PyObject* module);
void THPStorage_assertNotNull(THPStorage* storage);
TORCH_PYTHON_API void THPStorage_assertNotNull(PyObject* obj);
TORCH_PYTHON_API extern PyTypeObject THPStorageType;
inline const c10::Storage& THPStorage_Unpack(THPStorage* storage) {
return *storage->cdata;
}
inline const c10::Storage& THPStorage_Unpack(PyObject* obj) {
return THPStorage_Unpack(reinterpret_cast<THPStorage*>(obj));
}
#endif
```
|
=================================================================================================================================
SOURCE CODE FILE: StorageMethods.h
LINES: 1
SIZE: 0.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\StorageMethods.h
ENCODING: utf-8
```h
#ifndef THP_STORAGE_METHODS_INC
#define THP_STORAGE_METHODS_INC
#include <Python.h>
PyMethodDef* THPStorage_getMethods();
#endif
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.