text
stringlengths 145
7.65M
|
---|
==============================================================================================================================================
SOURCE CODE FILE: combined_traceback.h
LINES: 1
SIZE: 2.47 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\combined_traceback.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/runtime/interpreter.h>
#include <torch/csrc/profiler/unwind/unwind.h>
namespace torch {
// struct that holds the result of symbolizing multiple tracebacks
// each traceback is a list of indices into all_frames
// (lots of Frames get duplicated across traces)
struct TORCH_API SymbolizedTracebacks {
std::vector<unwind::Frame> all_frames;
// index into all_frames, so that
// it is possible to dedupe frame objects in
// construction of python objects
std::vector<std::vector<uint64_t>> tracebacks;
};
struct TORCH_API CapturedTraceback : public c10::GatheredContext {
struct PyFrame {
void* code; // PyCodeObject*, but python headers not present
int lasti;
};
static std::shared_ptr<CapturedTraceback> gather(
bool python,
bool script,
bool cpp);
CapturedTraceback() = default;
CapturedTraceback(const CapturedTraceback&) = delete;
CapturedTraceback& operator=(const CapturedTraceback&) = delete;
CapturedTraceback(CapturedTraceback&&) noexcept = default;
CapturedTraceback& operator=(CapturedTraceback&&) noexcept = delete;
~CapturedTraceback() override;
using visitproc = int (*)(void* self, void* arg);
struct Python {
virtual std::vector<PyFrame> gather() = 0;
virtual void release(std::vector<PyFrame>& frames) = 0;
virtual void appendSymbolized(
const std::vector<PyFrame>& to_symbolize,
SymbolizedTracebacks& st) = 0;
// tp_traverse/tp_clear implementations
virtual int traverse(
std::vector<PyFrame>& frames,
visitproc visit,
void* arg) = 0;
virtual int clear(std::vector<PyFrame>& frames) = 0;
virtual ~Python() = default;
Python* next_ = nullptr;
};
// called once by each python interpreter to
// register python stack recording functionality
// p cannot be deleted once added.
static void addPythonUnwinder(Python* p);
int traversePython(visitproc visit, void* arg);
int clearPython();
private:
std::vector<PyFrame> frames_;
std::vector<void*> cpp_frames_;
std::vector<jit::StackEntry> script_frames_;
friend TORCH_API SymbolizedTracebacks
symbolize(const std::vector<CapturedTraceback*>& to_symbolize);
// non-owning reference to one of the immortal Python* objects
// registered above.
Python* python_ = nullptr;
};
TORCH_API SymbolizedTracebacks
symbolize(const std::vector<CapturedTraceback*>& to_symbolize);
} // namespace torch
```
|
======================================================================================================================================
SOURCE CODE FILE: containers.h
LINES: 1
SIZE: 6.05 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\containers.h
ENCODING: utf-8
```h
#pragma once
#include <algorithm>
#include <array>
#include <cstddef>
#include <cstdint>
#include <forward_list>
#include <utility>
#include <c10/macros/Macros.h>
#include <c10/util/ArrayRef.h>
#include <c10/util/Exception.h>
namespace torch::profiler::impl {
// ============================================================================
// == AppendOnlyList ==========================================================
// ============================================================================
// During profiling, we have a very predictable access pattern: we only
// append to the end of the container. We can specialize and outperform both
// std::vector (which must realloc) and std::deque (which performs a double
// indirection), and this class of operation is sufficiently important to the
// profiling hot path to warrant specializing:
// https://godbolt.org/z/rTjozf1c4
// https://quick-bench.com/q/mmfuu71ogwaiULDCJyHdKnHZms4 (Prototype #1,
// int) https://quick-bench.com/q/5vWDW6jjdXVdoffev2zst8D09no (Prototype
// #1, int pair) https://quick-bench.com/q/IfEkfAQMeJSNBA52xtMP6Agcl-Q
// (Prototype #2, int pair)
// https://quick-bench.com/q/wJV2lKmuXL4XyGJzcI5hs4gEHFg (Prototype #3, int
// pair) https://quick-bench.com/q/xiO8ZaBEkYRYUA9dFrMuPLlW9fo (Full impl,
// int pair)
// AppendOnlyList has 2x lower emplace overhead compared to more generic STL
// containers.
//
// The optimal value of `ChunkSize` will vary by use case, but testing shows
// that a value of 1024 does a good job amortizing the `malloc` cost of growth.
// Performance drops off for larger values, so testing on a case-by-case basis
// is recommended if performance is absolutely critical.
template <
typename T,
size_t ChunkSize,
template <typename U, size_t N> class block_t = std::array>
class AppendOnlyList {
public:
using array_t = block_t<T, ChunkSize>;
static_assert(
std::is_base_of_v<std::array<T, ChunkSize>, array_t>,
"AppendOnlyList expects raw low level pointer storage.");
static_assert(ChunkSize > 0, "Block cannot be empty.");
AppendOnlyList() : buffer_last_{buffer_.before_begin()} {}
AppendOnlyList(const AppendOnlyList&) = delete;
AppendOnlyList(AppendOnlyList&&) = delete;
AppendOnlyList& operator=(const AppendOnlyList&) = delete;
AppendOnlyList& operator=(AppendOnlyList&&) = delete;
~AppendOnlyList() = default;
size_t size() const {
return n_blocks_ * ChunkSize - (size_t)(end_ - next_);
}
template <class... Args>
T* emplace_back(Args&&... args) {
maybe_grow();
if constexpr (
std::is_trivially_destructible_v<T> &&
std::is_trivially_destructible_v<array_t>) {
::new ((void*)next_) T{std::forward<Args>(args)...};
} else {
*next_ = T{std::forward<Args>(args)...};
}
return next_++;
}
template <typename T0>
std::enable_if_t<std::is_same_v<T0, T> && std::is_trivially_copyable_v<T>>
copy(c10::ArrayRef<T0> src) {
size_t n = src.size();
if (C10_UNLIKELY(n == 0)) {
return;
}
maybe_grow();
if (C10_LIKELY(next_ && (next_ + n <= end_))) {
std::memcpy((void*)next_, (void*)src.begin(), n * sizeof(T0));
next_ += n;
} else {
// We could chunk this into several `memcpy`s, but because we expect this
// fallback to be infrequent (n << ChunkSize) the performance impact is
// negligible.
for (auto i : src) {
emplace_back(i);
}
}
}
void clear() {
buffer_.clear();
buffer_last_ = buffer_.before_begin();
n_blocks_ = 0;
next_ = nullptr;
end_ = nullptr;
}
struct Iterator {
using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = T;
using pointer = T*;
using reference = T&;
Iterator(std::forward_list<array_t>& buffer, const size_t size)
: block_{buffer.begin()}, size_{size} {}
// End iterator.
Iterator() = default;
bool exhausted() const {
return current_ >= size_;
}
reference operator*() const {
return *current_ptr(/*checked=*/true);
}
pointer operator->() {
return current_ptr(/*checked=*/true);
}
// Prefix increment
Iterator& operator++() {
if (!(++current_ % ChunkSize)) {
block_++;
}
return *this;
}
// Postfix increment
Iterator operator++(int) {
Iterator tmp = *this;
++(*this);
return tmp;
}
friend bool operator==(const Iterator& a, const Iterator& b) {
return a.current_ptr() == b.current_ptr();
}
friend bool operator!=(const Iterator& a, const Iterator& b) {
return a.current_ptr() != b.current_ptr();
}
std::pair<array_t*, size_t> address() const {
if (current_ >= size_) {
return {nullptr, 0};
}
return {&(*block_), current_ % ChunkSize};
}
private:
T* current_ptr(bool checked = false) const {
auto a = address();
if (a.first == nullptr) {
TORCH_INTERNAL_ASSERT(!checked, "Invalid access on AppendOnlyList.");
return nullptr;
}
return a.first->data() + a.second;
}
typename std::forward_list<array_t>::iterator block_;
size_t current_{0};
size_t size_{0};
};
Iterator begin() {
return Iterator(buffer_, size());
}
Iterator end() {
return Iterator();
}
// TODO: cbegin and cend()
private:
void maybe_grow() {
if (C10_UNLIKELY(next_ == end_)) {
buffer_last_ = buffer_.emplace_after(buffer_last_);
n_blocks_++;
next_ = buffer_last_->data();
end_ = next_ + ChunkSize;
}
}
std::forward_list<array_t> buffer_;
// We maintain a pointer to the last element of `buffer_` so that we can
// insert at the end in O(1) time.
size_t n_blocks_{0};
T* next_{nullptr};
T* end_{nullptr};
protected:
typename std::forward_list<array_t>::iterator buffer_last_;
};
} // namespace torch::profiler::impl
```
|
=====================================================================================================================================
SOURCE CODE FILE: data_flow.h
LINES: 1
SIZE: 3.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\data_flow.h
ENCODING: utf-8
```h
#pragma once
#include <memory>
#include <ATen/core/TensorBody.h>
#include <c10/core/TensorImpl.h>
#include <c10/macros/Macros.h>
#include <c10/util/strong_type.h>
namespace torch::profiler::impl {
// Identity is a complex concept in PyTorch. A Tensor might not have a
// an associated storage, multiple Tensors might share the same underlying
// storage, the storage of a Tensor might change over time, etc.
//
// For the purpose of profiling we're mostly interested in data flow
// analysis. As a result, we can take an expansive view of identity:
// Tensors share an ID if they share a TensorImpl or storage data.
//
// This identity equality is transitive; If Tensors T0 and T1 share a storage
// S0 and T1 later points to a different storage S1 then all Tensors which
// point to either S0 or S1 are considered to have the same identity. (Since
// profiler cannot reason beyond that.)
//
// The profiler will handle lifetime analysis to ensure that identities do
// not run afoul of the ABA problem. This does, however, mean that identities
// can only be assigned when memory profiling is enabled.
using TensorID = strong::type<size_t, struct TensorID_, strong::regular>;
// Uniquely identifies an allocation. (Generally a StorageImpl's data ptr.)
using AllocationID = strong::type<
size_t,
struct StorageID_,
strong::ordered,
strong::regular,
strong::hashable>;
// We use a Tensor's TensorImpl adress and StorageImpl data start to build the
// data flow graph. We do not hold an owning reference so we wrap them in strong
// types to prevent direct access.
using TensorImplAddress = strong::type<
const c10::TensorImpl*,
struct TensorImplAddress_,
strong::regular,
strong::hashable,
strong::boolean>;
using StorageImplData = strong::type<
const void*,
struct StorageImplData_,
strong::regular,
strong::hashable,
strong::boolean>;
// ============================================================================
// == weak_intrusive_ptr and the ABA problem for TensorImpl* ==================
// ============================================================================
// Tracking `TensorImpl`s is an important part of identity tracking, because
// a Tensor might change storage; however when it does we want to retain the
// fact that the old and new storage belong to the same logical Tensor. We
// cannot take an owning reference to the Tensor because that would change
// program semantics by extending the lifetime of the Tensor. However if we
// store a raw TensorImpl* pointer the TensorImpl might be deleted and a new
// TensorImpl might be created that reuses the address. (ABA problem)
//
// Fortunately, there is a feature of `c10::intrusive_ptr` that we can use to
// prevent address reuse for the duration of profiling: the weak intrusive ptr.
// When a Tensor's refcount reaches zero but there are outstanding weak
// references (`weakcount_ > 0`) it will free the underlying managed resources
// by calling `target_->release_resources()`, but it will not call `delete`.
// (Instead, `delete` is called when the last weak reference is destroyed.)
// This means that we can safely use address identity to track `TensorImpls`.
class WeakTensor {
public:
explicit WeakTensor(const at::Tensor& t) : weak_self_(t.getIntrusivePtr()) {}
auto get() const {
return TensorImplAddress{weak_self_._unsafe_get_target()};
}
private:
c10::weak_intrusive_ptr<c10::TensorImpl> weak_self_;
};
struct Result;
void calculateUniqueTensorIDs(
std::vector<std::shared_ptr<Result>>& sorted_results);
} // namespace torch::profiler::impl
```
|
==================================================================================================================================
SOURCE CODE FILE: events.h
LINES: 1
SIZE: 1.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\events.h
ENCODING: utf-8
```h
#pragma once
#include <array>
#include <cstdint>
#include <cstring>
#include <vector>
namespace torch::profiler {
/* A vector type to hold a list of performance counters */
using perf_counters_t = std::vector<uint64_t>;
/* Standard list of performance events independent of hardware or backend */
constexpr std::array<const char*, 2> ProfilerPerfEvents = {
/*
* Number of Processing Elelement (PE) cycles between two points of interest
* in time. This should correlate positively with wall-time. Measured in
* uint64_t. PE can be non cpu. TBD reporting behavior for multiple PEs
* participating (i.e. threadpool).
*/
"cycles",
/* Number of PE instructions between two points of interest in time. This
* should correlate positively with wall time and the amount of computation
* (i.e. work). Across repeat executions, the number of instructions should
* be more or less invariant. Measured in uint64_t. PE can be non cpu.
*/
"instructions"};
} // namespace torch::profiler
```
|
===================================================================================================================================================
SOURCE CODE FILE: kineto_client_interface.h
LINES: 1
SIZE: 0.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\kineto_client_interface.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/runtime/interpreter.h>
#include <torch/csrc/profiler/unwind/unwind.h>
namespace torch {
// declare global_kineto_init for libtorch_cpu.so to call
TORCH_API void global_kineto_init();
} // namespace torch
```
|
=======================================================================================================================================
SOURCE CODE FILE: kineto_shim.h
LINES: 1
SIZE: 3.97 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\kineto_shim.h
ENCODING: utf-8
```h
#pragma once
#include <memory>
#include <string>
// Skip Kineto dependency on mobile unless explicitly asked for.
// When is it explicitly asked for?
// KinetoEdgeCPUProfiler uses KinetoProfiler for cpu
// event profiling. This has a dependency on cpu only libkineto
#if defined(USE_KINETO) && defined(C10_MOBILE) && \
!defined(EDGE_PROFILER_USE_KINETO)
#undef USE_KINETO
#endif
#include <ActivityType.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/profiler/api.h>
#ifdef USE_KINETO
// Forward declarations so we don't have to include `libkineto.h` in a header.
namespace libkineto {
class GenericTraceActivity;
struct CpuTraceBuffer;
class ActivityTraceInterface;
} // namespace libkineto
#endif
namespace torch {
namespace profiler {
#ifdef USE_KINETO
constexpr bool kKinetoAvailable{true};
#else
constexpr bool kKinetoAvailable{false};
#endif
namespace impl::kineto {
// ----------------------------------------------------------------------------
// -- Interface (Does not require Kineto) -------------------------------------
// ----------------------------------------------------------------------------
struct DeviceAndResource {
int32_t device;
int32_t resource;
};
const DeviceAndResource kineto_ids();
#ifdef USE_KINETO
using trace_t = libkineto::CpuTraceBuffer;
using interface_trace_t = libkineto::ActivityTraceInterface;
using activity_t = libkineto::GenericTraceActivity;
#else
struct DummyTraceBuffer {};
struct DummyTraceInterface {};
using trace_t = DummyTraceBuffer;
using interface_trace_t = DummyTraceBuffer;
struct activity_t;
#endif // USE_KINETO
void addMetadata(
activity_t* activity,
const std::string& key,
const std::string& value);
// Wraps: libkineto::CpuTraceBuffer
struct TraceWrapper {
TraceWrapper(const int64_t start_time, const std::string& name);
// The caller is expected to hold a mutex when calling `addCPUActivity`.
activity_t* addCPUActivity(
const std::string& name,
const libkineto::ActivityType type,
const DeviceAndResource device_and_resource,
const uint64_t correlation_id,
const int64_t start_time,
const int64_t end_time);
void transferCpuTrace(int64_t end_time);
explicit operator bool() const;
std::unique_ptr<trace_t>& get() {
return cpu_trace_;
}
private:
std::unique_ptr<trace_t> cpu_trace_;
};
// Wraps libkineto::ActivityTraceInterface
struct ActivityTraceWrapper {
explicit ActivityTraceWrapper(std::unique_ptr<interface_trace_t>&& trace);
ActivityTraceWrapper() = default;
explicit operator bool() const;
void save(const std::string& path);
const std::unique_ptr<interface_trace_t>& get() {
return trace_;
}
private:
std::unique_ptr<interface_trace_t> trace_;
#ifdef USE_KINETO
bool saved_ = false; // Kineto's save is destructive
#endif
};
using ActivitySet = std::set<torch::autograd::profiler::ActivityType>;
void prepareTrace(
const bool cpuOnly,
const ActivitySet& activities,
const torch::profiler::impl::ExperimentalConfig& config,
const std::string& trace_id = "");
void toggleCollectionDynamic(const bool enable);
void startTrace();
ActivityTraceWrapper stopTrace();
void pushCorrelationId(uint64_t correlation_id);
void pushUserCorrelationId(uint64_t correlation_id);
void popCorrelationId();
void popUserCorrelationId();
void recordThreadInfo();
bool collectivesProfilerExists();
void logInvariantViolation(
const std::string& assertion,
const std::string& error,
const std::string& profile_id,
const std::string& group_profile_id);
} // namespace impl::kineto
} // namespace profiler
namespace autograd::profiler {
c10::DeviceType deviceTypeFromActivity(libkineto::ActivityType activity_type);
TORCH_API void addMetadataJson(
const std::string& key,
const std::string& value);
TORCH_API void profilerStep();
} // namespace autograd::profiler
} // namespace torch
```
|
==================================================================================================================================================
SOURCE CODE FILE: observer.h
LINES: 1
SIZE: 6.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\orchestration\observer.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/record_function.h>
#include <torch/csrc/Export.h>
#include <utility>
namespace torch::profiler::impl {
// ----------------------------------------------------------------------------
// -- Profiler Config ---------------------------------------------------------
// ----------------------------------------------------------------------------
enum class C10_API_ENUM ActivityType {
CPU = 0,
XPU, // XPU kernels, runtime
CUDA, // CUDA kernels, runtime
HPU, // HPU kernels, runtime
MTIA, // MTIA kernels, runtime
PrivateUse1, // PrivateUse1 kernels, runtime
NUM_KINETO_ACTIVITIES, // must be the last one
};
inline std::string actToString(ActivityType t) {
const std::array<
std::string,
static_cast<size_t>(ActivityType::NUM_KINETO_ACTIVITIES)>
ActivityTypeNames = {"CPU", "XPU", "CUDA", "MTIA", "PrivateUse1"};
return ActivityTypeNames[static_cast<int>(t)];
}
enum class C10_API_ENUM ProfilerState {
Disabled = 0,
CPU, // CPU-only profiling
CUDA, // CPU + CUDA events
NVTX, // only emit NVTX markers
ITT, // only emit ITT markers
PRIVATEUSE1, // only emit PRIVATEUSE1 markers
KINETO, // use libkineto
KINETO_GPU_FALLBACK, // use CUDA events when CUPTI is not available
KINETO_PRIVATEUSE1_FALLBACK, // use PrivateUse1 events
KINETO_ONDEMAND, // run the profiler in on-demand mode
NUM_PROFILER_STATES, // must be the last one
};
enum class C10_API_ENUM ActiveProfilerType {
NONE = 0,
LEGACY,
KINETO,
NVTX,
ITT,
PRIVATEUSE1
};
struct TORCH_API ExperimentalConfig {
ExperimentalConfig(
std::vector<std::string> profiler_metrics = {},
bool profiler_measure_per_kernel = false,
bool verbose = false,
std::vector<std::string> performance_events = {},
bool enable_cuda_sync_events = false,
bool adjust_profiler_step = false,
bool disable_external_correlation = false,
bool profile_all_threads = false,
bool capture_overload_names = false,
bool adjust_timestamps = false);
explicit operator bool() const;
std::vector<std::string> profiler_metrics;
bool profiler_measure_per_kernel;
bool verbose;
/*
* List of performance events to be profiled.
* An empty list will disable performance event based profiling altogether.
*/
std::vector<std::string> performance_events;
/*
* For CUDA profiling mode, enable adding CUDA synchronization events
* that expose CUDA device, stream and event synchronization activities.
* This feature is new and currently disabled by default.
*/
bool enable_cuda_sync_events;
/*
* Controls whether or not timestamp adjustment for ProfilerStep and parent
* Python events occurs after profiling. This occurs at an O(n) cost and
* affects only the start of profiler step events.
*/
bool adjust_profiler_step;
/*
* Controls whether or not external correlation is disabled. This is used to
* lower the amount of events received by CUPTI as correlation events are
* paired with runtime/gpu events for each kind of correlation
*/
bool disable_external_correlation;
/* controls whether profiler records cpu events on threads
* that are not spawned from the main thread on which the
* profiler was enabled, similar to on_demand mode */
bool profile_all_threads;
/* controls whether overload names are queried from an ATen
* function schema and stored in the profile */
bool capture_overload_names;
/*
* Controls whether or not timestamp adjustment occurs after profiling.
* The purpose of this is to adjust Vulkan event timelines to align with those
* of their parent CPU events.
* This sometimes requires increasing CPU event durations (to fully contain
* their child events) and delaying CPU event start times (to
* prevent overlaps), so this should not be used unless Vulkan events are
* being profiled and it is ok to use this modified timestamp/duration
* information instead of the original information.
*/
bool adjust_timestamps;
};
struct TORCH_API ProfilerConfig {
explicit ProfilerConfig(
ProfilerState state,
bool report_input_shapes = false,
bool profile_memory = false,
bool with_stack = false,
bool with_flops = false,
bool with_modules = false,
ExperimentalConfig experimental_config = ExperimentalConfig(),
std::string trace_id = "");
bool disabled() const;
bool global() const;
bool pushGlobalCallbacks() const;
ProfilerState state;
ExperimentalConfig experimental_config;
bool report_input_shapes;
bool profile_memory;
bool with_stack;
bool with_flops;
bool with_modules;
std::string trace_id;
// For serialization
at::IValue toIValue() const;
static ProfilerConfig fromIValue(const at::IValue& profilerConfigIValue);
};
// ----------------------------------------------------------------------------
// -- Profiler base class -----------------------------------------------------
// ----------------------------------------------------------------------------
struct TORCH_API ProfilerStateBase : public c10::MemoryReportingInfoBase {
explicit ProfilerStateBase(ProfilerConfig config);
ProfilerStateBase(const ProfilerStateBase&) = delete;
ProfilerStateBase(ProfilerStateBase&&) = delete;
ProfilerStateBase& operator=(const ProfilerStateBase&) = delete;
ProfilerStateBase& operator=(ProfilerStateBase&&) = delete;
~ProfilerStateBase() override;
static ProfilerStateBase* get(bool global);
static ProfilerStateBase* get() {
auto* out = get(/*global=*/true);
return out ? out : get(/*global=*/false);
}
static void push(std::shared_ptr<ProfilerStateBase>&& state);
static std::shared_ptr<ProfilerStateBase> pop(bool global);
static std::shared_ptr<ProfilerStateBase> pop() {
auto out = pop(/*global=*/true);
return out ? std::move(out) : pop(/*global=*/false);
}
const ProfilerConfig& config() const {
return config_;
}
void setCallbackHandle(at::CallbackHandle handle);
void removeCallback();
bool memoryProfilingEnabled() const override {
return config_.profile_memory;
}
virtual ActiveProfilerType profilerType() = 0;
protected:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::mutex state_mutex_;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
ProfilerConfig config_ = ProfilerConfig(ProfilerState::Disabled);
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
at::CallbackHandle handle_ = 0;
};
// Note: The following are only for the active *thread local* profiler.
TORCH_API bool profilerEnabled();
TORCH_API ActiveProfilerType profilerType();
TORCH_API ProfilerConfig getProfilerConfig();
} // namespace torch::profiler::impl
```
|
=======================================================================================================================================================
SOURCE CODE FILE: python_tracer.h
LINES: 1
SIZE: 1.82 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\orchestration\python_tracer.h
ENCODING: utf-8
```h
#pragma once
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <c10/util/ApproximateClock.h>
#include <c10/util/strong_type.h>
#include <torch/csrc/profiler/kineto_shim.h>
#include <torch/csrc/profiler/util.h>
namespace torch::profiler::impl {
class RecordQueue;
struct Result;
namespace python_tracer {
using TraceKey = strong::type<
uint64_t,
struct TraceKey_,
strong::regular,
strong::hashable,
strong::ostreamable>;
struct CompressedEvent {
TraceKey key_;
uint64_t system_tid_{};
kineto::DeviceAndResource kineto_info_{};
c10::time_t enter_t_{};
};
/*
Libtorch does not depend on Python (e.g. cannot #include <Python.h>); however
when we call the profiler from libtorch_python we need the profiler to be able
to ingest the data that we collect from the Python tracer. (`PyEval_SetProfile`)
In order to solve this dependency issue we define a virtual base and a function
to register a getter. The python tracer then implements these functions and
exposes itself by calling `registerTracer` from `torch/csrc/autograd/init.cpp`.
This pattern of registration for faux python dependencies in libtorch is common
in the PyTorch codebase.
*/
struct TORCH_API PythonTracerBase {
static std::unique_ptr<PythonTracerBase> make(RecordQueue* queue);
virtual ~PythonTracerBase() = default;
virtual void stop() = 0;
virtual void restart() = 0;
virtual std::vector<std::shared_ptr<Result>> getEvents(
std::function<c10::time_t(c10::approx_time_t)> time_converter,
std::vector<CompressedEvent>& enters,
c10::time_t end_time_ns) = 0;
};
using MakeFn = std::unique_ptr<PythonTracerBase> (*)(RecordQueue*);
TORCH_API void registerTracer(MakeFn make_tracer);
} // namespace python_tracer
} // namespace torch::profiler::impl
```
|
================================================================================================================================================
SOURCE CODE FILE: vulkan.h
LINES: 1
SIZE: 0.78 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\orchestration\vulkan.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/profiler/stubs/base.h>
#include <torch/csrc/profiler/util.h>
#include <cstdint>
namespace torch::profiler::impl::vulkan {
// Using function pointer i.e. [std::tuple<std::string, uint64_t> (*)(int64_t)]
// doesn't work because we need to capture the QueryPool in the lambda context
// https://stackoverflow.com/a/28746827
using GetShaderNameAndDurationNsFn =
std::function<std::tuple<std::string, uint64_t>(int64_t)>;
TORCH_API void registerGetShaderNameAndDurationNs(
GetShaderNameAndDurationNsFn get_shader_name_and_duration_ns);
TORCH_API void deregisterGetShaderNameAndDurationNs();
std::tuple<std::string, uint64_t> getShaderNameAndDurationNs(
const vulkan_id_t& vulkan_id);
} // namespace torch::profiler::impl::vulkan
```
|
====================================================================================================================================
SOURCE CODE FILE: perf-inl.h
LINES: 1
SIZE: 1.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\perf-inl.h
ENCODING: utf-8
```h
#pragma once
#if defined(__ANDROID__) || defined(__linux__)
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/syscall.h>
#include <linux/perf_event.h>
#endif /* __ANDROID__ || __linux__ */
#include <torch/csrc/profiler/perf.h>
#include <limits>
namespace torch::profiler::impl::linux_perf {
/*
* PerfEvent
* ---------
*/
inline void PerfEvent::Disable() const {
#if defined(__ANDROID__) || defined(__linux__)
ioctl(fd_, PERF_EVENT_IOC_DISABLE, 0);
#endif /* __ANDROID__ || __linux__ */
}
inline void PerfEvent::Enable() const {
#if defined(__ANDROID__) || defined(__linux__)
ioctl(fd_, PERF_EVENT_IOC_ENABLE, 0);
#endif /* __ANDROID__ || __linux__ */
}
inline void PerfEvent::Reset() const {
#if defined(__ANDROID__) || defined(__linux__)
ioctl(fd_, PERF_EVENT_IOC_RESET, 0);
#endif /* __ANDROID__ || __linux__ */
}
/*
* PerfProfiler
* ------------
*/
inline uint64_t PerfProfiler::CalcDelta(uint64_t start, uint64_t end) const {
if (end < start) { // overflow
return end + (std::numeric_limits<uint64_t>::max() - start);
}
// not possible to wrap around start for a 64b cycle counter
return end - start;
}
inline void PerfProfiler::StartCounting() const {
for (auto& e : events_) {
e.Enable();
}
}
inline void PerfProfiler::StopCounting() const {
for (auto& e : events_) {
e.Disable();
}
}
} // namespace torch::profiler::impl::linux_perf
```
|
================================================================================================================================
SOURCE CODE FILE: perf.h
LINES: 1
SIZE: 2.62 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\perf.h
ENCODING: utf-8
```h
#pragma once
#include <array>
#include <cstdint>
#include <memory>
#include <stack>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <torch/csrc/profiler/events.h>
#include <c10/util/Exception.h>
namespace torch::profiler::impl::linux_perf {
/*
* Maximum number of events supported
* This stems from the hardware limitation on CPU performance counters, and the
* fact that we don't support time multiplexing just yet.
* Time multiplexing involves scaling the counter values proportional to
* the enabled and running time or running the workload multiple times.
*/
constexpr uint8_t MAX_EVENTS = 4;
struct PerfCounter {
uint64_t value; /* The value of the event */
uint64_t time_enabled; /* for TIME_ENABLED */
uint64_t time_running; /* for TIME_RUNNING */
};
/*
* Basic perf event handler for Android and Linux
*/
class PerfEvent {
public:
explicit PerfEvent(std::string& name) : name_(name) {}
PerfEvent(const PerfEvent& other) = delete;
PerfEvent& operator=(const PerfEvent&) = delete;
PerfEvent& operator=(PerfEvent&& other) noexcept {
if (this != &other) {
fd_ = other.fd_;
other.fd_ = -1;
name_ = std::move(other.name_);
}
return *this;
}
PerfEvent(PerfEvent&& other) noexcept {
*this = std::move(other);
}
~PerfEvent();
/* Setup perf events with the Linux Kernel, attaches perf to this process
* using perf_event_open(2) */
void Init();
/* Stop incrementing hardware counters for this event */
void Disable() const;
/* Start counting hardware event from this point on */
void Enable() const;
/* Zero out the counts for this event */
void Reset() const;
/* Returns PerfCounter values for this event from kernel, on non supported
* platforms this always returns zero */
uint64_t ReadCounter() const;
private:
/* Name of the event */
std::string name_;
int fd_ = -1;
};
class PerfProfiler {
public:
/* Configure all the events and track them as individual PerfEvent */
void Configure(std::vector<std::string>& event_names);
/* Enable events counting from here */
void Enable();
/* Disable counting and fill in the caller supplied container with delta
* calculated from the start count values since last Enable() */
void Disable(perf_counters_t&);
private:
uint64_t CalcDelta(uint64_t start, uint64_t end) const;
void StartCounting() const;
void StopCounting() const;
std::vector<PerfEvent> events_;
std::stack<perf_counters_t> start_values_;
};
} // namespace torch::profiler::impl::linux_perf
```
|
=====================================================================================================================================================
SOURCE CODE FILE: combined_traceback.h
LINES: 1
SIZE: 0.79 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\python\combined_traceback.h
ENCODING: utf-8
```h
#include <torch/csrc/profiler/combined_traceback.h>
#include <pybind11/pybind11.h>
#include <torch/csrc/utils/pybind.h>
namespace torch {
// symbolize combined traceback objects, converting them into lists of
// dictionaries that are easily consumed in python.
// returns std::vector because one use is to call it with a batch of
// tracebacks that come from a larger datastructure (e.g. a memory snapshot)
// and then have more c++ code to put those objects in the right place.
TORCH_API std::vector<pybind11::object> py_symbolize(
std::vector<CapturedTraceback*>& to_symbolize);
// requires GIL to be held, frees any pending free frames
TORCH_PYTHON_API void freeDeadCapturedTracebackFrames();
TORCH_PYTHON_API void installCapturedTracebackPython();
} // namespace torch
```
|
=======================================================================================================================================
SOURCE CODE FILE: init.h
LINES: 1
SIZE: 1.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\python\init.h
ENCODING: utf-8
```h
#pragma once
#include <Python.h>
#include <torch/csrc/profiler/collection.h>
#include <torch/csrc/profiler/python/pybind.h>
namespace pybind11::detail {
using torch::profiler::impl::TensorID;
#define STRONG_POINTER_TYPE_CASTER(T) \
template <> \
struct type_caster<T> : public strong_pointer_type_caster<T> {};
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::StorageImplData)
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::AllocationID)
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::TensorImplAddress)
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleSelf)
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleCls)
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyOptimizerSelf)
#undef STRONG_POINTER_TYPE_CASTER
template <>
struct type_caster<TensorID> : public strong_uint_type_caster<TensorID> {};
} // namespace pybind11::detail
namespace torch::profiler {
void initPythonBindings(PyObject* module);
} // namespace torch::profiler
```
|
=========================================================================================================================================
SOURCE CODE FILE: pybind.h
LINES: 1
SIZE: 1.28 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\python\pybind.h
ENCODING: utf-8
```h
#pragma once
#include <pybind11/pybind11.h>
#include <c10/util/strong_type.h>
#include <torch/csrc/utils/pybind.h>
#include <torch/csrc/utils/python_numbers.h>
namespace pybind11::detail {
// Strong typedefs don't make much sense in Python since everything is duck
// typed. So instead we simply extract the underlying value and let the caller
// handle correctness.
template <typename T>
struct strong_pointer_type_caster {
template <typename T_>
static handle cast(
const T_& src,
return_value_policy /*policy*/,
handle /*parent*/) {
const auto* ptr = reinterpret_cast<const void*>(src.value_of());
return ptr ? handle(THPUtils_packUInt64(reinterpret_cast<intptr_t>(ptr)))
: none();
}
bool load(handle /*src*/, bool /*convert*/) {
return false;
}
PYBIND11_TYPE_CASTER(T, _("strong_pointer"));
};
template <typename T>
struct strong_uint_type_caster {
template <typename T_>
static handle cast(
const T_& src,
return_value_policy /*policy*/,
handle /*parent*/) {
return handle(THPUtils_packUInt64(src.value_of()));
}
bool load(handle /*src*/, bool /*convert*/) {
return false;
}
PYBIND11_TYPE_CASTER(T, _("strong_uint"));
};
} // namespace pybind11::detail
```
|
===============================================================================================================================================================
SOURCE CODE FILE: execution_trace_observer.h
LINES: 1
SIZE: 0.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\standalone\execution_trace_observer.h
ENCODING: utf-8
```h
#pragma once
#include <c10/macros/Export.h>
#include <string>
namespace torch::profiler::impl {
// Adds the execution trace observer as a global callback function, the data
// will be written to output file path.
TORCH_API bool addExecutionTraceObserver(const std::string& output_file_path);
// Remove the execution trace observer from the global callback functions.
TORCH_API void removeExecutionTraceObserver();
// Enables execution trace observer.
TORCH_API void enableExecutionTraceObserver();
// Disables execution trace observer.
TORCH_API void disableExecutionTraceObserver();
} // namespace torch::profiler::impl
```
|
===================================================================================================================================================
SOURCE CODE FILE: itt_observer.h
LINES: 1
SIZE: 0.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\standalone\itt_observer.h
ENCODING: utf-8
```h
#include <torch/csrc/profiler/api.h>
namespace torch::profiler::impl {
void pushITTCallbacks(
const ProfilerConfig& config,
const std::unordered_set<at::RecordScope>& scopes);
} // namespace torch::profiler::impl
```
|
====================================================================================================================================================
SOURCE CODE FILE: nvtx_observer.h
LINES: 1
SIZE: 0.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\standalone\nvtx_observer.h
ENCODING: utf-8
```h
#include <torch/csrc/profiler/api.h>
namespace torch::profiler::impl {
void pushNVTXCallbacks(
const ProfilerConfig& config,
const std::unordered_set<at::RecordScope>& scopes);
} // namespace torch::profiler::impl
```
|
===========================================================================================================================================================
SOURCE CODE FILE: privateuse1_observer.h
LINES: 1
SIZE: 1.46 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\standalone\privateuse1_observer.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/profiler/api.h>
namespace torch::profiler::impl {
using CallBackFnPtr = void (*)(
const ProfilerConfig& config,
const std::unordered_set<at::RecordScope>& scopes);
struct PushPRIVATEUSE1CallbacksStub {
PushPRIVATEUSE1CallbacksStub() = default;
PushPRIVATEUSE1CallbacksStub(const PushPRIVATEUSE1CallbacksStub&) = delete;
PushPRIVATEUSE1CallbacksStub& operator=(const PushPRIVATEUSE1CallbacksStub&) =
delete;
PushPRIVATEUSE1CallbacksStub(PushPRIVATEUSE1CallbacksStub&&) = default;
PushPRIVATEUSE1CallbacksStub& operator=(PushPRIVATEUSE1CallbacksStub&&) =
default;
~PushPRIVATEUSE1CallbacksStub() = default;
template <typename... ArgTypes>
void operator()(ArgTypes&&... args) {
return (*push_privateuse1_callbacks_fn)(std::forward<ArgTypes>(args)...);
}
void set_privateuse1_dispatch_ptr(CallBackFnPtr fn_ptr) {
push_privateuse1_callbacks_fn = fn_ptr;
}
private:
CallBackFnPtr push_privateuse1_callbacks_fn = nullptr;
};
extern TORCH_API struct PushPRIVATEUSE1CallbacksStub
pushPRIVATEUSE1CallbacksStub;
struct RegisterPRIVATEUSE1Observer {
RegisterPRIVATEUSE1Observer(
PushPRIVATEUSE1CallbacksStub& stub,
CallBackFnPtr value) {
stub.set_privateuse1_dispatch_ptr(value);
}
};
#define REGISTER_PRIVATEUSE1_OBSERVER(name, fn) \
static RegisterPRIVATEUSE1Observer name##__register(name, fn);
} // namespace torch::profiler::impl
```
|
======================================================================================================================================
SOURCE CODE FILE: base.h
LINES: 1
SIZE: 1.70 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\stubs\base.h
ENCODING: utf-8
```h
#pragma once
#include <functional>
#include <memory>
#include <c10/core/Device.h>
#include <c10/util/strong_type.h>
#include <torch/csrc/Export.h>
struct CUevent_st;
namespace torch::profiler::impl {
// ----------------------------------------------------------------------------
// -- Annotation --------------------------------------------------------------
// ----------------------------------------------------------------------------
using ProfilerEventStub = std::shared_ptr<CUevent_st>;
using ProfilerVoidEventStub = std::shared_ptr<void>;
struct TORCH_API ProfilerStubs {
virtual void record(
c10::DeviceIndex* device,
ProfilerVoidEventStub* event,
int64_t* cpu_ns) const = 0;
virtual float elapsed(
const ProfilerVoidEventStub* event,
const ProfilerVoidEventStub* event2) const = 0;
virtual void mark(const char* name) const = 0;
virtual void rangePush(const char* name) const = 0;
virtual void rangePop() const = 0;
virtual bool enabled() const {
return false;
}
virtual void onEachDevice(std::function<void(int)> op) const = 0;
virtual void synchronize() const = 0;
virtual ~ProfilerStubs() = default;
};
TORCH_API void registerCUDAMethods(ProfilerStubs* stubs);
TORCH_API const ProfilerStubs* cudaStubs();
TORCH_API void registerITTMethods(ProfilerStubs* stubs);
TORCH_API const ProfilerStubs* ittStubs();
TORCH_API void registerPrivateUse1Methods(ProfilerStubs* stubs);
TORCH_API const ProfilerStubs* privateuse1Stubs();
using vulkan_id_t = strong::type<
int64_t,
struct _VulkanID,
strong::regular,
strong::convertible_to<int64_t>,
strong::hashable>;
} // namespace torch::profiler::impl
```
|
=========================================================================================================================================
SOURCE CODE FILE: action.h
LINES: 1
SIZE: 1.45 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\action.h
ENCODING: utf-8
```h
#pragma once
#include <cstdint>
#include <ostream>
namespace torch::unwind {
enum {
A_UNDEFINED = 0x0,
A_REG_PLUS_DATA = 0x1, // exp = REG[reg] + data0
A_LOAD_CFA_OFFSET = 0x2, // exp = *(cfa + data0)
A_REG_PLUS_DATA_DEREF = 0x3 // exp = *(REG[reg] + data0)
};
// register numbers in dwarf info
enum {
D_UNDEFINED = -1,
D_RBP = 6,
D_RSP = 7,
D_RIP = 16,
D_REG_SIZE = 17,
};
struct Action {
uint8_t kind = A_UNDEFINED;
int32_t reg = -1;
int64_t data = 0;
static Action undefined() {
return Action{A_UNDEFINED};
}
static Action regPlusData(int32_t reg, int64_t offset) {
return Action{A_REG_PLUS_DATA, reg, offset};
}
static Action regPlusDataDeref(int32_t reg, int64_t offset) {
return Action{A_REG_PLUS_DATA_DEREF, reg, offset};
}
static Action loadCfaOffset(int64_t offset) {
return Action{A_LOAD_CFA_OFFSET, D_UNDEFINED, offset};
}
friend std::ostream& operator<<(std::ostream& out, const Action& self) {
switch (self.kind) {
case A_UNDEFINED:
out << "u";
break;
case A_REG_PLUS_DATA:
out << "r" << (int)self.reg << " + " << self.data;
break;
case A_REG_PLUS_DATA_DEREF:
out << "*(r" << (int)self.reg << " + " << self.data << ")";
break;
case A_LOAD_CFA_OFFSET:
out << "*(cfa + " << self.data << ")";
break;
}
return out;
}
};
} // namespace torch::unwind
```
|
==============================================================================================================================================
SOURCE CODE FILE: communicate.h
LINES: 1
SIZE: 2.27 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\communicate.h
ENCODING: utf-8
```h
#pragma once
#include <ext/stdio_filebuf.h>
#include <torch/csrc/profiler/unwind/unwind_error.h>
#include <unistd.h>
#include <array>
#include <memory>
namespace torch::unwind {
// helper to open a process with stdin/stdout/stderr streams.
struct Communicate {
Communicate(const char* command, const char** args) {
if (pipe(inpipe_.data()) < 0 || pipe(outpipe_.data()) < 0 ||
pipe(errpipe_.data()) < 0) {
throw UnwindError("pipe() failed");
}
pid_t pid = fork();
if (pid < 0) {
throw UnwindError("fork() failed");
} else if (pid == 0) { // child process
close(inpipe_[1]);
close(outpipe_[0]);
close(errpipe_[0]);
dup2(inpipe_[0], STDIN_FILENO);
dup2(outpipe_[1], STDOUT_FILENO);
dup2(errpipe_[1], STDERR_FILENO);
execvp(command, (char* const*)args);
throw UnwindError("failed execvp");
} else { // parent process
close(inpipe_[0]);
close(outpipe_[1]);
close(errpipe_[1]);
outbuf_ = std::make_unique<__gnu_cxx::stdio_filebuf<char>>(
inpipe_[1], std::ios::out);
inbuf_ = std::make_unique<__gnu_cxx::stdio_filebuf<char>>(
outpipe_[0], std::ios::in);
errbuf_ = std::make_unique<__gnu_cxx::stdio_filebuf<char>>(
errpipe_[0], std::ios::in);
in_ = std::make_unique<std::istream>(inbuf_.get());
out_ = std::make_unique<std::ostream>(outbuf_.get());
err_ = std::make_unique<std::ostream>(errbuf_.get());
}
}
Communicate(const Communicate&) = delete;
Communicate(Communicate&&) = delete;
Communicate& operator=(const Communicate&) = delete;
Communicate& operator=(Communicate&&) = delete;
~Communicate() {
close(inpipe_[1]);
close(outpipe_[0]);
close(errpipe_[0]);
}
std::ostream& out() {
return *out_;
}
std::ostream& err() {
return *err_;
}
std::istream& in() {
return *in_;
}
private:
std::array<int, 2> inpipe_{-1, -1};
std::array<int, 2> outpipe_{-1, -1};
std::array<int, 2> errpipe_{-1, -1};
std::unique_ptr<__gnu_cxx::stdio_filebuf<char>> outbuf_, inbuf_, errbuf_;
std::unique_ptr<std::istream> in_;
std::unique_ptr<std::ostream> out_;
std::unique_ptr<std::ostream> err_;
};
} // namespace torch::unwind
```
|
=============================================================================================================================================
SOURCE CODE FILE: debug_info.h
LINES: 16
SIZE: 9.16 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\debug_info.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/profiler/unwind/dwarf_enums.h>
#include <torch/csrc/profiler/unwind/dwarf_symbolize_enums.h>
#include <torch/csrc/profiler/unwind/lexer.h>
#include <torch/csrc/profiler/unwind/sections.h>
#include <torch/csrc/profiler/unwind/unwind_error.h>
#include <cstdint>
#include <optional>
namespace torch::unwind {
struct DebugInfo {
DebugInfo(Sections& s) : s_(s) {}
void parse(uint64_t offset) {
auto L = parseHeader(offset);
parseCompileUnit(L);
}
std::optional<uint64_t> lineNumberProgramOffset() {
return line_number_program_offset_;
}
uint64_t nextOffset() {
return end_ - s_.debug_info.data;
}
std::vector<std::pair<uint64_t, uint64_t>> ranges() {
if (range_ptr_) {
auto offset = range_ptr_->first;
if (range_ptr_->second == DW_FORM_rnglistx) {
UNWIND_CHECK(rnglists_base_, "rnglistx but not rnglists_base_ set");
LOG_INFO("index for rnglistx {:x} + {:x}\n", *rnglists_base_, offset);
CheckedLexer L = s_.debug_rnglists.lexer(
*rnglists_base_ + offset * sec_offset_size_);
auto read = readSegmentOffset(L);
offset = *rnglists_base_ + read;
}
return version_ == 4 ? readRanges4(offset) : readRanges5(offset);
}
if (!highpc_) {
return {};
}
return {{lowpc_, lowpc_ + *highpc_}};
}
bool is64bit() {
return is_64bit_;
}
private:
CheckedLexer parseHeader(uint64_t offset) {
offset_ = offset;
CheckedLexer L = s_.debug_info.lexer(offset_);
std::tie(length_, is_64bit_) = L.readSectionLength();
sec_offset_size_ = is_64bit_ ? 8 : 4;
end_ = (const char*)L.loc() + length_;
version_ = L.read<uint16_t>();
UNWIND_CHECK(
version_ == 5 || version_ == 4,
"unexpected dwarf version {}",
version_);
uint8_t address_size = 0;
if (version_ == 5) {
auto unit_type = L.read<uint8_t>();
UNWIND_CHECK(unit_type == 0x1, "unexpected unit type {}", unit_type);
address_size = L.read<uint8_t>();
debug_abbrev_offset_ =
is_64bit_ ? L.read<uint64_t>() : L.read<uint32_t>();
} else {
debug_abbrev_offset_ =
is_64bit_ ? L.read<uint64_t>() : L.read<uint32_t>();
address_size = L.read<uint8_t>();
}
LOG_INFO(
"compilation unit at offset {:x} with length {:x} and debug_abbrev_offset {:x}\n",
offset,
length_,
debug_abbrev_offset_);
UNWIND_CHECK(
address_size == 8,
"expected 64-bit dwarf but found address size {}",
address_size);
return L;
}
uint64_t readSegmentOffset(CheckedLexer& L) {
return s_.readSegmentOffset(L, is_64bit_);
}
uint64_t readEncoded(CheckedLexer& L, uint64_t encoding) {
switch (encoding) {
case DW_FORM_data8:
case DW_FORM_addr:
return L.read<uint64_t>();
case DW_FORM_data4:
return L.read<uint32_t>();
case DW_FORM_addrx: {
auto idx = L.readULEB128();
return s_.debug_addr.lexer(address_base_ + sizeof(uint64_t) * idx)
.read<uint64_t>();
}
case DW_FORM_sec_offset:
return readSegmentOffset(L);
case DW_FORM_rnglistx: {
return L.readULEB128();
}
default:
UNWIND_CHECK(false, "unexpected encoding");
}
}
void parseCompileUnit(CheckedLexer& L) {
auto entry = L.readULEB128();
auto A = findAbbrev(debug_abbrev_offset_, entry);
while (true) {
auto attr = A.readULEB128();
auto form = A.readULEB128();
if (attr == 0 && form == 0) {
break;
}
if (form == DW_FORM_implicit_const) {
A.readSLEB128();
}
if (attr == DW_AT_low_pc) {
lowpc_ = readEncoded(L, form);
LOG_INFO(" lowpc {:x}\n", lowpc_);
} else if (attr == DW_AT_high_pc) {
highpc_ = readEncoded(L, form);
range_ptr_ = std::nullopt;
LOG_INFO(" highpc {:x}\n", *highpc_);
} else if (attr == DW_AT_addr_base) {
UNWIND_CHECK(form == DW_FORM_sec_offset, "unexpected addr_base form");
address_base_ = readSegmentOffset(L);
LOG_INFO(" address base {:x}\n", address_base_);
} else if (attr == DW_AT_rnglists_base) {
UNWIND_CHECK(
form == DW_FORM_sec_offset, "unexpected rnglists_base form");
rnglists_base_ = readSegmentOffset(L);
LOG_INFO(" range base {:x}\n", *rnglists_base_);
} else if (form == DW_FORM_string) {
L.readCString();
} else if (attr == DW_AT_stmt_list) {
UNWIND_CHECK(form == DW_FORM_sec_offset, "unexpected stmt_list form");
LOG_INFO(" program table offset {:x}\n", *line_number_program_offset_);
line_number_program_offset_ = readSegmentOffset(L);
} else if (form == DW_FORM_exprloc) {
auto sz = L.readULEB128();
L.skip(int64_t(sz));
} else if (form == DW_FORM_block1) {
auto sz = L.read<uint8_t>();
L.skip(int64_t(sz));
} else if (attr == DW_AT_ranges) {
auto range_offset = readEncoded(L, form);
LOG_INFO("setting range_ptr to {:x} {:x}\n", range_offset, form);
range_ptr_.emplace(range_offset, form);
} else if (
form == DW_FORM_udata || form == DW_FORM_rnglistx ||
form == DW_FORM_strx || form == DW_FORM_loclistx ||
form == DW_FORM_addrx) {
L.readULEB128();
} else if (form == DW_FORM_sdata) {
L.readSLEB128();
} else {
auto sz = formSize(form, sec_offset_size_);
UNWIND_CHECK(sz, "unsupported form in compilation unit {:x}", form);
L.skip(int64_t(*sz));
}
}
}
std::vector<std::pair<uint64_t, uint64_t>> readRanges4(uint64_t offset) {
CheckedLexer L = s_.debug_ranges.lexer(offset);
std::vector<std::pair<uint64_t, uint64_t>> ranges;
uint64_t base = lowpc_;
while (true) {
auto start = L.read<uint64_t>();
auto end = L.read<uint64_t>();
if (start == 0 && end == 0) {
break;
}
if (start == std::numeric_limits<uint64_t>::max()) {
base = end;
} else {
ranges.emplace_back(base + start, base + end);
}
}
return ranges;
}
std::vector<std::pair<uint64_t, uint64_t>> readRanges5(uint64_t offset) {
CheckedLexer L = s_.debug_rnglists.lexer(offset);
uint64_t base = 0;
LOG_INFO("BEGIN RANGES {:x}\n", offset);
std::vector<std::pair<uint64_t, uint64_t>> ranges;
while (true) {
auto op = L.read<uint8_t>();
switch (op) {
case DW_RLE_end_of_list:
LOG_INFO("END RANGES\n");
return ranges;
case DW_RLE_base_addressx: {
base = readEncoded(L, DW_FORM_addrx);
LOG_INFO("BASE ADDRX {:x}\n", base);
} break;
case DW_RLE_startx_length: {
auto s = readEncoded(L, DW_FORM_addrx);
auto e = L.readULEB128();
LOG_INFO("startx_length {:x} {:x}\n", s, e);
ranges.emplace_back(s, s + e);
} break;
case DW_RLE_base_address:
base = L.read<uint64_t>();
LOG_INFO("BASE ADDR {:x}\n", base);
break;
case DW_RLE_offset_pair: {
auto s = L.readULEB128();
auto e = L.readULEB128();
LOG_INFO("offset_pair {:x} {:x}\n", s, e);
ranges.emplace_back(base + s, base + e);
} break;
case DW_RLE_start_length: {
auto s = L.read<uint64_t>();
auto e = L.readULEB128();
LOG_INFO("start_length {:x} {:x}\n", s, e);
ranges.emplace_back(s, s + e);
} break;
default:
UNWIND_CHECK(false, "unknown range op: {}", op);
}
}
}
CheckedLexer findAbbrev(uint64_t offset, uint64_t entry) {
CheckedLexer L = s_.debug_abbrev.lexer(offset);
while (true) {
auto abbrev_code = L.readULEB128();
UNWIND_CHECK(
abbrev_code != 0,
"could not find entry {} at offset {:x}",
entry,
offset);
auto tag = L.readULEB128();
L.read<uint8_t>(); // has children
if (abbrev_code == entry) {
UNWIND_CHECK(
tag == DW_TAG_compile_unit,
"first entry was not a compile unit but {}",
tag);
return L;
}
while (true) {
auto attr = L.readULEB128();
auto form = L.readULEB128();
if (attr == 0 && form == 0) {
break;
}
if (form == DW_FORM_implicit_const) {
L.readSLEB128();
}
}
}
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
Sections& s_;
std::optional<uint64_t> line_number_program_offset_;
uint64_t offset_ = 0;
uint8_t sec_offset_size_ = 0;
uint64_t length_ = 0;
const char* end_ = nullptr;
uint64_t debug_abbrev_offset_ = 0;
bool is_64bit_ = false;
std::optional<std::pair<uint64_t, uint8_t>> range_ptr_;
uint64_t lowpc_ = 0;
std::optional<uint64_t> highpc_;
uint16_t version_ = 0;
uint64_t address_base_ = 0;
std::optional<uint64_t> rnglists_base_;
};
} // namespace torch::unwind
```
|
==============================================================================================================================================
SOURCE CODE FILE: dwarf_enums.h
LINES: 1
SIZE: 1.13 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\dwarf_enums.h
ENCODING: utf-8
```h
#pragma once
enum {
DW_EH_PE_absptr = 0x00,
DW_EH_PE_omit = 0xff,
/* FDE data encoding. */
DW_EH_PE_uleb128 = 0x01,
DW_EH_PE_udata2 = 0x02,
DW_EH_PE_udata4 = 0x03,
DW_EH_PE_udata8 = 0x04,
DW_EH_PE_sleb128 = 0x09,
DW_EH_PE_sdata2 = 0x0a,
DW_EH_PE_sdata4 = 0x0b,
DW_EH_PE_sdata8 = 0x0c,
DW_EH_PE_signed = 0x08,
/* FDE flags. */
DW_EH_PE_pcrel = 0x10,
DW_EH_PE_textrel = 0x20,
DW_EH_PE_datarel = 0x30,
DW_EH_PE_funcrel = 0x40,
DW_EH_PE_aligned = 0x50,
DW_EH_PE_indirect = 0x80,
};
enum {
DW_CFA_nop = 0x0,
DW_CFA_advance_loc = 0x01,
DW_CFA_offset = 0x02,
DW_CFA_restore = 0x03,
DW_CFA_advance_loc1 = 0x02,
DW_CFA_advance_loc2 = 0x03,
DW_CFA_advance_loc4 = 0x04,
DW_CFA_restore_extended = 0x06,
DW_CFA_undefined = 0x07,
DW_CFA_register = 0x09,
DW_CFA_remember_state = 0x0a,
DW_CFA_restore_state = 0x0b,
DW_CFA_def_cfa = 0x0c,
DW_CFA_def_cfa_register = 0x0d,
DW_CFA_def_cfa_offset = 0x0e,
DW_CFA_def_cfa_expression = 0xf,
DW_CFA_expression = 0x10,
DW_CFA_offset_extended_sf = 0x11,
DW_CFA_GNU_args_size = 0x2e,
DW_OP_deref = 0x6,
};
```
|
========================================================================================================================================================
SOURCE CODE FILE: dwarf_symbolize_enums.h
LINES: 1
SIZE: 4.72 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\dwarf_symbolize_enums.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/profiler/unwind/unwind_error.h>
#include <cstdint>
#include <optional>
enum {
DW_TAG_subprogram = 0x2e,
DW_TAG_inlined_subroutine = 0x1d,
DW_TAG_compile_unit = 0x11,
DW_AT_sibling = 0x1, // reference
DW_AT_name = 0x3, // string
DW_AT_stmt_list = 0x10, // lineptr
DW_AT_addr_base = 0x73, // sec_offset
DW_AT_rnglists_base = 0x74, // sec_offset
DW_AT_low_pc = 0x11, // address
DW_AT_high_pc = 0x12, // address
DW_AT_specification = 0x47, // reference
DW_AT_abstract_origin = 0x31, // reference
DW_AT_linkage_name = 0x6e, // string
DW_AT_ranges = 0x55, // rnglist
DW_AT_str_offsets_base = 0x72, // sec_offset
DW_FORM_addr = 0x01,
DW_FORM_block2 = 0x03,
DW_FORM_block4 = 0x04,
DW_FORM_data2 = 0x05,
DW_FORM_data4 = 0x06,
DW_FORM_data8 = 0x07,
DW_FORM_string = 0x08,
DW_FORM_block = 0x09,
DW_FORM_block1 = 0x0a,
DW_FORM_data1 = 0x0b,
DW_FORM_flag = 0x0c,
DW_FORM_sdata = 0x0d,
DW_FORM_strp = 0x0e,
DW_FORM_udata = 0x0f,
DW_FORM_ref_addr = 0x10,
DW_FORM_ref1 = 0x11,
DW_FORM_ref2 = 0x12,
DW_FORM_ref4 = 0x13,
DW_FORM_ref8 = 0x14,
DW_FORM_ref_udata = 0x15,
DW_FORM_indirect = 0x16,
DW_FORM_sec_offset = 0x17,
DW_FORM_exprloc = 0x18,
DW_FORM_flag_present = 0x19,
DW_FORM_strx = 0x1a,
DW_FORM_addrx = 0x1b,
DW_FORM_ref_sup4 = 0x1c,
DW_FORM_strp_sup = 0x1d,
DW_FORM_data16 = 0x1e,
DW_FORM_line_strp = 0x1f,
DW_FORM_ref_sig8 = 0x20,
DW_FORM_implicit_const = 0x21,
DW_FORM_loclistx = 0x22,
DW_FORM_rnglistx = 0x23,
DW_FORM_ref_sup8 = 0x24,
DW_FORM_strx1 = 0x25,
DW_FORM_strx2 = 0x26,
DW_FORM_strx3 = 0x27,
DW_FORM_strx4 = 0x28,
DW_FORM_addrx1 = 0x29,
DW_FORM_addrx2 = 0x2a,
DW_FORM_addrx3 = 0x2b,
DW_FORM_addrx4 = 0x2c,
/* GNU Debug Fission extensions. */
DW_FORM_GNU_addr_index = 0x1f01,
DW_FORM_GNU_str_index = 0x1f02,
DW_FORM_GNU_ref_alt = 0x1f20, /* offset in alternate .debuginfo. */
DW_FORM_GNU_strp_alt = 0x1f21, /* offset in alternate .debug_str. */
DW_LNCT_path = 0x1,
DW_LNCT_directory_index = 0x2,
DW_LNS_extended_op = 0x00,
DW_LNE_end_sequence = 0x01,
DW_LNE_set_address = 0x02,
DW_LNS_copy = 0x01,
DW_LNS_advance_pc = 0x02,
DW_LNS_advance_line = 0x03,
DW_LNS_set_file = 0x04,
DW_LNS_const_add_pc = 0x08,
DW_LNS_fixed_advance_pc = 0x09,
DW_RLE_end_of_list = 0x0,
DW_RLE_base_addressx = 0x1,
DW_RLE_startx_endx = 0x2,
DW_RLE_startx_length = 0x3,
DW_RLE_offset_pair = 0x4,
DW_RLE_base_address = 0x5,
DW_RLE_start_end = 0x6,
DW_RLE_start_length = 0x7
};
static std::optional<size_t> formSize(uint64_t form, uint8_t sec_offset_size) {
switch (form) {
case DW_FORM_addr:
return sizeof(void*);
case DW_FORM_block2:
case DW_FORM_block4:
return std::nullopt;
case DW_FORM_data2:
return 2;
case DW_FORM_data4:
return 4;
case DW_FORM_data8:
return 8;
case DW_FORM_string:
case DW_FORM_block:
case DW_FORM_block1:
return std::nullopt;
case DW_FORM_data1:
case DW_FORM_flag:
return 1;
case DW_FORM_sdata:
return std::nullopt;
case DW_FORM_strp:
return sec_offset_size;
case DW_FORM_udata:
return std::nullopt;
case DW_FORM_ref_addr:
return sec_offset_size;
case DW_FORM_ref1:
return 1;
case DW_FORM_ref2:
return 2;
case DW_FORM_ref4:
return 4;
case DW_FORM_ref8:
return 8;
case DW_FORM_ref_udata:
case DW_FORM_indirect:
return std::nullopt;
case DW_FORM_sec_offset:
return sec_offset_size;
case DW_FORM_exprloc:
return std::nullopt;
case DW_FORM_flag_present:
return 0;
case DW_FORM_strx:
case DW_FORM_addrx:
return std::nullopt;
case DW_FORM_ref_sup4:
return 4;
case DW_FORM_strp_sup:
return sec_offset_size;
case DW_FORM_data16:
return 16;
case DW_FORM_line_strp:
return sec_offset_size;
case DW_FORM_ref_sig8:
return 8;
case DW_FORM_implicit_const:
return 0;
case DW_FORM_loclistx:
case DW_FORM_rnglistx:
return std::nullopt;
case DW_FORM_ref_sup8:
return 8;
case DW_FORM_strx1:
return 1;
case DW_FORM_strx2:
return 2;
case DW_FORM_strx3:
return 3;
case DW_FORM_strx4:
return 4;
case DW_FORM_addrx1:
return 1;
case DW_FORM_addrx2:
return 2;
case DW_FORM_addrx3:
return 3;
case DW_FORM_addrx4:
return 4;
case DW_FORM_GNU_addr_index:
case DW_FORM_GNU_str_index:
case DW_FORM_GNU_ref_alt:
case DW_FORM_GNU_strp_alt:
default:
return std::nullopt;
}
}
```
|
===============================================================================================================================================
SOURCE CODE FILE: eh_frame_hdr.h
LINES: 1
SIZE: 2.71 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\eh_frame_hdr.h
ENCODING: utf-8
```h
#pragma once
#include <cstdint>
#include <ostream>
#include <torch/csrc/profiler/unwind/lexer.h>
#include <torch/csrc/profiler/unwind/unwind_error.h>
// Overview of the format described in
// https://refspecs.linuxfoundation.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html
namespace torch::unwind {
struct EHFrameHdr {
EHFrameHdr(void* base) : base_(base) {
Lexer L(base, base);
version_ = L.read<uint8_t>();
eh_frame_ptr_enc_ = L.read<uint8_t>();
fde_count_enc_ = L.read<uint8_t>();
table_enc_ = L.read<uint8_t>();
if (table_enc_ == DW_EH_PE_omit) {
table_size_ = 0;
} else {
switch (table_enc_ & 0xF) {
case DW_EH_PE_udata2:
case DW_EH_PE_sdata2:
table_size_ = 2;
break;
case DW_EH_PE_udata4:
case DW_EH_PE_sdata4:
table_size_ = 4;
break;
case DW_EH_PE_udata8:
case DW_EH_PE_sdata8:
table_size_ = 8;
break;
case DW_EH_PE_uleb128:
case DW_EH_PE_sleb128:
throw UnwindError("uleb/sleb table encoding not supported");
break;
default:
throw UnwindError("unknown table encoding");
}
}
// NOLINTNEXTLINE(performance-no-int-to-ptr)
eh_frame_ = (void*)L.readEncodedOr(eh_frame_ptr_enc_, 0);
fde_count_ = L.readEncodedOr(fde_count_enc_, 0);
table_start_ = L.loc();
}
size_t nentries() const {
return fde_count_;
}
uint64_t lowpc(size_t i) const {
return Lexer(table_start_, base_)
.skip(2 * i * table_size_)
.readEncoded(table_enc_);
}
void* fde(size_t i) const {
// NOLINTNEXTLINE(performance-no-int-to-ptr)
return (void*)Lexer(table_start_, base_)
.skip((2 * i + 1) * table_size_)
.readEncoded(table_enc_);
}
void* entryForAddr(uint64_t addr) const {
if (!table_size_ || !nentries()) {
throw UnwindError("search table not present");
}
uint64_t low = 0;
uint64_t high = nentries();
while (low + 1 < high) {
auto mid = (low + high) / 2;
if (addr < lowpc(mid)) {
high = mid;
} else {
low = mid;
}
}
return fde(low);
}
friend std::ostream& operator<<(std::ostream& out, const EHFrameHdr& self) {
out << "EHFrameHeader(version=" << self.version_
<< ",table_size=" << self.table_size_
<< ",fde_count=" << self.fde_count_ << ")";
return out;
}
private:
void* base_;
void* table_start_;
uint8_t version_;
uint8_t eh_frame_ptr_enc_;
uint8_t fde_count_enc_;
uint8_t table_enc_;
void* eh_frame_ = nullptr;
int64_t fde_count_;
uint32_t table_size_;
};
} // namespace torch::unwind
```
|
==================================================================================================================================================
SOURCE CODE FILE: fast_symbolizer.h
LINES: 3
SIZE: 3.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\fast_symbolizer.h
ENCODING: utf-8
```h
#pragma once
#include <fmt/format.h>
#include <sys/types.h>
#include <torch/csrc/profiler/unwind/debug_info.h>
#include <torch/csrc/profiler/unwind/line_number_program.h>
#include <torch/csrc/profiler/unwind/sections.h>
#include <torch/csrc/profiler/unwind/unwind.h>
#include <torch/csrc/profiler/unwind/unwind_error.h>
#include <memory>
#include <unordered_map>
namespace torch::unwind {
#define UNWIND_WARN(w, ...) \
do { \
w.emplace_back(fmt::format(__VA_ARGS__)); \
LOG_INFO("WARNING: {}\n", w.back()); \
} while (0);
struct FastSymbolizer {
FastSymbolizer() = default;
Frame symbolize(const std::string& library, uint64_t offset) {
LOG_INFO("symbolizing {} + 0x{:x}\n", library, offset);
Frame frame;
frame.funcname = "??";
frame.filename = library;
frame.lineno = offset;
auto s = getOrCreateSections(library);
if (auto e = s->findSubprogramName(offset)) {
frame.funcname = *e;
} else {
UNWIND_WARN(
warnings_,
"failed to find subprogram name for {} 0x{:x}",
library,
offset);
}
if (auto e = findLine(s, offset)) {
frame.filename = e->first;
frame.lineno = e->second;
} else {
UNWIND_WARN(
warnings_, "failed to find file/line for {} 0x{:x}", library, offset);
}
return frame;
}
const std::vector<std::string>& warnings() {
return warnings_;
}
private:
void parseDebugInfo(Sections* s) {
uint64_t offset = 0;
while (offset < s->debug_info.size) {
DebugInfo info(*s);
info.parse(offset);
if (auto lnp_offset = info.lineNumberProgramOffset()) {
for (auto r : info.ranges()) {
s->addDebugInfoRange(r.first, r.second, line_number_programs_.size());
}
line_number_programs_.emplace_back(
std::make_unique<LineNumberProgram>(*s, *lnp_offset));
}
offset = info.nextOffset();
}
}
Sections* getOrCreateSections(const std::string& library) {
auto it = libraries_.find(library);
if (it == libraries_.end()) {
it = libraries_.insert({library, std::make_unique<Sections>()}).first;
try {
Sections* s = it->second.get();
s->parse(library.c_str());
parseDebugInfo(s);
} catch (UnwindError& err) {
UNWIND_WARN(
warnings_, "failed to parse library {}: {}", library, err.what());
}
}
return it->second.get();
}
std::optional<std::pair<std::string, int64_t>> findLine(
Sections* s,
uint64_t offset) {
if (auto idx = s->findDebugInfoOffset(offset)) {
auto r = line_number_programs_.at(*idx).get();
try {
r->parse();
} catch (UnwindError& err) {
UNWIND_WARN(
warnings_,
"failed to read line number program [{:x}] {}",
r->offset(),
err.what());
}
if (auto e = r->find(offset)) {
return std::make_pair(r->filename(e->file), e->line);
}
}
return std::nullopt;
}
std::unordered_map<std::string, std::unique_ptr<Sections>> libraries_;
std::vector<std::unique_ptr<LineNumberProgram>> line_number_programs_;
std::vector<std::string> warnings_;
};
} // namespace torch::unwind
```
|
======================================================================================================================================
SOURCE CODE FILE: fde.h
LINES: 16
SIZE: 12.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\fde.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/Exception.h>
#include <c10/util/irange.h>
#include <torch/csrc/profiler/unwind/action.h>
#include <torch/csrc/profiler/unwind/lexer.h>
#include <array>
#include <iostream>
#include <sstream>
#include <vector>
namespace torch::unwind {
struct TableState {
Action cfa;
std::array<Action, D_REG_SIZE> registers;
friend std::ostream& operator<<(std::ostream& out, const TableState& self) {
out << "cfa = " << self.cfa << "; ";
for (auto r : c10::irange(self.registers.size())) {
if (self.registers.at(r).kind != A_UNDEFINED) {
out << "r" << r << " = " << self.registers.at(r) << "; ";
}
}
return out;
}
};
// FDE - Frame Description Entry (Concept in ELF spec)
// This format is explained well by
// https://www.airs.com/blog/archives/460
// Details of different dwarf actions are explained
// in the spec document:
// https://web.archive.org/web/20221129184704/https://dwarfstd.org/doc/DWARF4.doc
// An overview of how DWARF unwinding works is given in
// https://dl.acm.org/doi/pdf/10.1145/3360572
// A similar implementation written in rust is:
// https://github.com/mstange/framehop/
template <bool LOG = false>
struct FDE {
FDE(void* data, const char* library_name, uint64_t load_bias)
: library_name_(library_name), load_bias_(load_bias) {
Lexer L(data);
auto length = L.read4or8Length();
void* fde_start = L.loc();
// NOLINTNEXTLINE(performance-no-int-to-ptr)
void* cie_data = (void*)((int64_t)fde_start - L.read<uint32_t>());
Lexer LC(cie_data);
auto cie_length = LC.read4or8Length();
void* cie_start = LC.loc();
auto zero = LC.read<uint32_t>();
TORCH_INTERNAL_ASSERT(zero == 0, "expected 0 for CIE");
auto version = LC.read<uint8_t>();
TORCH_INTERNAL_ASSERT(
version == 1 || version == 3, "non-1 version for CIE");
augmentation_string_ = LC.readCString();
if (hasAugmentation("eh")) {
throw UnwindError("unsupported 'eh' augmentation string");
}
code_alignment_factor_ = static_cast<int64_t>(LC.readULEB128());
data_alignment_factor_ = static_cast<int64_t>(LC.readSLEB128());
if (version == 1) {
ra_register_ = LC.read<uint8_t>();
} else {
ra_register_ = static_cast<int64_t>(LC.readULEB128());
}
// we assume this in the state
TORCH_INTERNAL_ASSERT(ra_register_ == 16, "unexpected number of registers");
if (augmentation_string_ && *augmentation_string_ == 'z') {
augmentation_length_ = static_cast<int64_t>(LC.readULEB128());
Lexer A(LC.loc());
for (auto ap = augmentation_string_ + 1; *ap; ap++) {
switch (*ap) {
case 'L':
lsda_enc = A.read<uint8_t>();
break;
case 'R':
fde_enc = A.read<uint8_t>();
break;
case 'P': {
uint8_t personality_enc = A.read<uint8_t>();
A.readEncoded(personality_enc);
} break;
case 'S': {
// signal handler
} break;
default: {
throw UnwindError("unknown augmentation string");
} break;
}
}
}
LC.skip(augmentation_length_);
low_pc_ = L.readEncoded(fde_enc);
high_pc_ = low_pc_ + L.readEncodedValue(fde_enc);
if (hasAugmentation("z")) {
augmentation_length_fde_ = static_cast<int64_t>(L.readULEB128());
}
L.readEncodedOr(lsda_enc, 0);
cie_begin_ = LC.loc();
fde_begin_ = L.loc();
cie_end_ = (void*)((const char*)cie_start + cie_length);
fde_end_ = (void*)((const char*)fde_start + length);
}
// OP Code implementations
void advance_raw(int64_t amount) {
auto previous_pc = current_pc_;
current_pc_ += amount;
if (LOG) {
(*out_) << (void*)(previous_pc - load_bias_) << "-"
<< (void*)(current_pc_ - load_bias_) << ": " << state() << "\n";
}
}
void advance_loc(int64_t amount) {
if (LOG) {
(*out_) << "advance_loc " << amount << "\n";
}
advance_raw(amount * code_alignment_factor_);
}
void offset(int64_t reg, int64_t offset) {
if (LOG) {
(*out_) << "offset " << reg << " " << offset << "\n";
}
if (reg > (int64_t)state().registers.size()) {
if (LOG) {
(*out_) << "OFFSET OF BIG REGISTER " << reg << "ignored...\n";
}
return;
}
state().registers.at(reg) =
Action{A_LOAD_CFA_OFFSET, -1, offset * data_alignment_factor_};
}
void restore(int64_t reg) {
if (LOG) {
(*out_) << "restore " << reg << "\n";
}
if (reg > (int64_t)state().registers.size()) {
if (LOG) {
(*out_) << "RESTORE OF BIG REGISTER " << reg << "ignored...\n";
}
return;
}
state().registers.at(reg) = initial_state_.registers.at(reg);
}
void def_cfa(int64_t reg, int64_t off) {
if (LOG) {
(*out_) << "def_cfa " << reg << " " << off << "\n";
}
last_reg_ = reg;
last_offset_ = off;
state().cfa = Action::regPlusData(static_cast<int32_t>(reg), off);
}
void def_cfa_register(int64_t reg) {
def_cfa(reg, last_offset_);
}
void def_cfa_offset(int64_t off) {
def_cfa(last_reg_, off);
}
void remember_state() {
if (LOG) {
(*out_) << "remember_state\n";
}
state_stack_.push_back(state());
}
void restore_state() {
if (LOG) {
(*out_) << "restore_state\n";
}
state_stack_.pop_back();
}
void undefined(int64_t reg) {
if (LOG) {
(*out_) << "undefined " << reg << "\n";
}
state().registers.at(reg) = Action::undefined();
}
void register_(int64_t reg, int64_t rhs_reg) {
if (LOG) {
(*out_) << "register " << reg << " " << rhs_reg << "\n";
}
state().registers.at(reg) =
Action::regPlusData(static_cast<int32_t>(reg), 0);
}
TableState& state() {
return state_stack_.back();
}
void dump(std::ostream& out) {
out_ = &out;
out << "FDE(augmentation_string=" << augmentation_string_
<< ", low_pc=" << (void*)(low_pc_ - load_bias_)
<< ",high_pc=" << (void*)(high_pc_ - load_bias_)
<< ",code_alignment_factor=" << code_alignment_factor_
<< ", data_alignment_factor=" << data_alignment_factor_
<< ", ra_register_=" << ra_register_ << ")\n";
readUpTo(high_pc_);
out_ = &std::cout;
}
TableState readUpTo(uint64_t addr) {
if (addr < low_pc_ || addr > high_pc_) {
throw UnwindError("Address not in range");
}
if (LOG) {
// NOLINTNEXTLINE(performance-no-int-to-ptr)
(*out_) << "readUpTo " << (void*)addr << " for " << library_name_
<< " at " << (void*)load_bias_ << "\n";
}
state_stack_.emplace_back();
current_pc_ = low_pc_;
// parse instructions...
Lexer LC(cie_begin_);
while (LC.loc() < cie_end_ && current_pc_ <= addr) {
readInstruction(LC);
}
if (current_pc_ > addr) {
return state();
}
initial_state_ = state_stack_.back();
if (LOG) {
(*out_) << "--\n";
}
Lexer L(fde_begin_);
while (L.loc() < fde_end_ && current_pc_ <= addr) {
readInstruction(L);
}
// so that we print the full range in debugging
if (current_pc_ <= addr) {
advance_raw(addr - current_pc_);
}
return state();
}
void dumpAddr2Line() {
std::cout << "addr2line -f -e " << library_name_ << " "
<< (void*)(low_pc_ - load_bias_) << "\n";
}
void readInstruction(Lexer& L) {
uint8_t bc = L.read<uint8_t>();
auto op = bc >> 6;
auto lowbits = bc & 0x3F;
switch (op) {
case 0x0: {
switch (lowbits) {
case DW_CFA_nop: {
return; // nop
}
case DW_CFA_advance_loc1: {
auto delta = L.read<uint8_t>();
return advance_loc(delta);
}
case DW_CFA_advance_loc2: {
auto delta = L.read<uint16_t>();
return advance_loc(delta);
}
case DW_CFA_advance_loc4: {
auto delta = L.read<uint32_t>();
return advance_loc(delta);
}
case DW_CFA_restore_extended: {
auto reg = L.readULEB128();
return restore(reg);
}
case DW_CFA_undefined: {
auto reg = L.readULEB128();
return undefined(reg);
}
case DW_CFA_register: {
auto reg = L.readULEB128();
auto rhs_reg = L.readULEB128();
return register_(reg, rhs_reg);
}
case DW_CFA_def_cfa: {
auto reg = L.readULEB128();
auto off = L.readULEB128();
return def_cfa(reg, off);
}
case DW_CFA_def_cfa_register: {
auto reg = L.readULEB128();
return def_cfa_register(reg);
}
case DW_CFA_def_cfa_offset: {
auto off = L.readULEB128();
return def_cfa_offset(off);
}
case DW_CFA_offset_extended_sf: {
auto reg = L.readULEB128();
auto off = L.readSLEB128();
return offset(reg, off);
}
case DW_CFA_remember_state: {
return remember_state();
}
case DW_CFA_restore_state: {
return restore_state();
}
case DW_CFA_GNU_args_size: {
// GNU_args_size, we do not need to know it..
L.readULEB128();
return;
}
case DW_CFA_expression: {
auto reg = L.readULEB128();
auto len = L.readULEB128();
// NOLINTNEXTLINE(performance-no-int-to-ptr)
auto end = (void*)((uint64_t)L.loc() + len);
auto op = L.read<uint8_t>();
if ((op & 0xF0) == 0x70) { // DW_bregX
auto rhs_reg = (op & 0xF);
auto addend = L.readSLEB128();
if (L.loc() == end) {
state().registers.at(reg) =
Action::regPlusDataDeref(rhs_reg, addend);
return;
}
}
throw UnwindError("Unsupported dwarf expression");
}
case DW_CFA_def_cfa_expression: {
auto len = L.readULEB128();
// NOLINTNEXTLINE(performance-no-int-to-ptr)
auto end = (void*)((uint64_t)L.loc() + len);
auto op = L.read<uint8_t>();
if ((op & 0xF0) == 0x70) { // DW_bregX
auto rhs_reg = (op & 0xF);
auto addend = L.readSLEB128();
if (L.loc() != end) {
auto op2 = L.read<uint8_t>();
if (op2 == DW_OP_deref && L.loc() == end) { // deref
state().cfa = Action::regPlusDataDeref(rhs_reg, addend);
return;
}
}
}
throw UnwindError("Unsupported def_cfa dwarf expression");
}
default: {
std::stringstream ss;
// NOLINTNEXTLINE(performance-no-int-to-ptr)
ss << "unknown op code " << (void*)(uint64_t)lowbits;
throw UnwindError(ss.str());
}
}
}
case DW_CFA_advance_loc: {
return advance_loc(lowbits);
}
case DW_CFA_offset: {
auto off = L.readULEB128();
return offset(lowbits, off);
}
case DW_CFA_restore: {
return restore(lowbits);
}
}
}
// used for debug printing
const char* library_name_;
uint64_t load_bias_;
// parsed from the eh_string data structures:
const char* augmentation_string_ = nullptr;
int64_t augmentation_length_ = 0;
int64_t augmentation_length_fde_ = 0;
int64_t code_alignment_factor_;
int64_t data_alignment_factor_;
void* cie_data_{nullptr};
int64_t ra_register_;
uint8_t lsda_enc = DW_EH_PE_omit;
uint8_t fde_enc = DW_EH_PE_absptr;
uint64_t low_pc_ = UINT64_MAX;
uint64_t high_pc_ = UINT64_MAX;
void* cie_begin_;
void* fde_begin_;
void* cie_end_;
void* fde_end_;
// state accumulated while parsing instructions
int64_t last_reg_ = 0;
int64_t last_offset_ = 0;
uint64_t current_pc_ = 0;
TableState
initial_state_; // state after the initial instructions, used by restore
std::vector<TableState> state_stack_;
std::ostream* out_ = &std::cout; // for debug dumping
private:
bool hasAugmentation(const char* s) {
return strstr(augmentation_string_, s) != nullptr;
}
};
} // namespace torch::unwind
```
|
========================================================================================================================================
SOURCE CODE FILE: lexer.h
LINES: 1
SIZE: 3.95 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\lexer.h
ENCODING: utf-8
```h
#pragma once
#include <cstdint>
#include <cstring>
#include <utility>
#include <torch/csrc/profiler/unwind/dwarf_enums.h>
#include <torch/csrc/profiler/unwind/unwind_error.h>
namespace torch::unwind {
template <bool checked>
struct LexerImpl {
LexerImpl(void* data, void* base = nullptr, void* end = nullptr)
: next_((const char*)data),
base_((int64_t)base),
end_((const char*)end) {}
template <typename T>
T read() {
T result;
auto end = next_ + sizeof(T);
UNWIND_CHECK(
!checked || end <= end_,
"read out of bounds {} >= {}",
(void*)end,
(void*)end_);
memcpy(&result, next_, sizeof(T));
next_ = end;
return result;
}
// SLEB/ULEB code adapted from LLVM equivalents
int64_t readSLEB128() {
int64_t Value = 0;
unsigned Shift = 0;
uint8_t Byte = 0;
do {
Byte = read<uint8_t>();
uint64_t Slice = Byte & 0x7f;
if ((Shift >= 64 && Slice != (Value < 0 ? 0x7f : 0x00)) ||
(Shift == 63 && Slice != 0 && Slice != 0x7f)) {
throw UnwindError("sleb128 too big for int64");
}
Value |= int64_t(Slice << Shift);
Shift += 7;
} while (Byte >= 128);
// Sign extend negative numbers if needed.
if (Shift < 64 && (Byte & 0x40)) {
Value |= int64_t((-1ULL) << Shift);
}
return Value;
}
uint64_t readULEB128() {
uint64_t Value = 0;
unsigned Shift = 0;
uint8_t p = 0;
do {
p = read<uint8_t>();
uint64_t Slice = p & 0x7f;
if ((Shift >= 64 && Slice != 0) || Slice << Shift >> Shift != Slice) {
throw UnwindError("uleb128 too big for uint64");
}
Value += Slice << Shift;
Shift += 7;
} while (p >= 128);
return Value;
}
const char* readCString() {
auto result = next_;
if (!checked) {
next_ += strlen(next_) + 1;
return result;
}
while (next_ < end_) {
if (*next_++ == '\0') {
return result;
}
}
UNWIND_CHECK(
false, "string is out of bounds {} >= {}", (void*)next_, (void*)end_);
}
int64_t readEncoded(uint8_t enc) {
int64_t r = 0;
switch (enc & (~DW_EH_PE_indirect & 0xF0)) {
case DW_EH_PE_absptr:
break;
case DW_EH_PE_pcrel:
r = (int64_t)next_;
break;
case DW_EH_PE_datarel:
r = base_;
break;
default:
throw UnwindError("unknown encoding");
}
return r + readEncodedValue(enc);
}
int64_t readEncodedOr(uint8_t enc, int64_t orelse) {
if (enc == DW_EH_PE_omit) {
return orelse;
}
return readEncoded(enc);
}
int64_t read4or8Length() {
return readSectionLength().first;
}
std::pair<int64_t, bool> readSectionLength() {
int64_t length = read<uint32_t>();
if (length == 0xFFFFFFFF) {
return std::make_pair(read<int64_t>(), true);
}
return std::make_pair(length, false);
}
void* loc() const {
return (void*)next_;
}
LexerImpl& skip(size_t bytes) {
next_ += bytes;
return *this;
}
int64_t readEncodedValue(uint8_t enc) {
switch (enc & 0xF) {
case DW_EH_PE_udata2:
return read<uint16_t>();
case DW_EH_PE_sdata2:
return read<int16_t>();
case DW_EH_PE_udata4:
return read<uint32_t>();
case DW_EH_PE_sdata4:
return read<int32_t>();
case DW_EH_PE_udata8:
return read<uint64_t>();
case DW_EH_PE_sdata8:
return read<int64_t>();
case DW_EH_PE_uleb128:
return readULEB128();
case DW_EH_PE_sleb128:
return readSLEB128();
default:
throw UnwindError("not implemented");
}
}
private:
const char* next_;
int64_t base_;
const char* end_;
};
// using Lexer = LexerImpl<false>;
using CheckedLexer = LexerImpl<true>;
using Lexer = LexerImpl<false>;
} // namespace torch::unwind
```
|
======================================================================================================================================================
SOURCE CODE FILE: line_number_program.h
LINES: 18
SIZE: 10.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\line_number_program.h
ENCODING: utf-8
```h
#include <c10/util/irange.h>
#include <torch/csrc/profiler/unwind/debug_info.h>
#include <torch/csrc/profiler/unwind/dwarf_enums.h>
#include <torch/csrc/profiler/unwind/dwarf_symbolize_enums.h>
#include <torch/csrc/profiler/unwind/lexer.h>
#include <torch/csrc/profiler/unwind/sections.h>
#include <torch/csrc/profiler/unwind/unwind_error.h>
#include <tuple>
namespace torch::unwind {
struct LineNumberProgram {
LineNumberProgram(Sections& s, uint64_t offset) : s_(s), offset_(offset) {}
uint64_t offset() {
return offset_;
}
void parse() {
if (parsed_) {
return;
}
parsed_ = true;
CheckedLexer L = s_.debug_line.lexer(offset_);
std::tie(length_, is_64bit_) = L.readSectionLength();
program_end_ = (char*)L.loc() + length_;
auto version = L.read<uint16_t>();
UNWIND_CHECK(
version == 5 || version == 4,
"expected version 4 or 5 but found {}",
version);
if (version == 5) {
auto address_size = L.read<uint8_t>();
UNWIND_CHECK(
address_size == 8,
"expected 64-bit dwarf but found address size {}",
address_size);
segment_selector_size_ = L.read<uint8_t>();
}
header_length_ = is_64bit_ ? L.read<uint64_t>() : L.read<uint32_t>();
program_ = L;
program_.skip(int64_t(header_length_));
minimum_instruction_length_ = L.read<uint8_t>();
maximum_operations_per_instruction_ = L.read<uint8_t>();
default_is_stmt_ = L.read<uint8_t>();
line_base_ = L.read<int8_t>();
line_range_ = L.read<uint8_t>();
opcode_base_ = L.read<uint8_t>();
UNWIND_CHECK(line_range_ != 0, "line_range_ must be non-zero");
standard_opcode_lengths_.resize(opcode_base_);
for (size_t i = 1; i < opcode_base_; i++) {
standard_opcode_lengths_[i] = L.read<uint8_t>();
}
// fmt::print("{:x} {:x} {} {} {} {} {}\n", offset_, header_length_,
// minimum_instruction_length_, maximum_operations_per_instruction_,
// line_base_, line_range_, opcode_base_);
uint8_t directory_entry_format_count = L.read<uint8_t>();
if (version == 5) {
struct Member {
uint64_t content_type;
uint64_t form;
};
std::vector<Member> directory_members;
directory_members.reserve(directory_entry_format_count);
for (size_t i = 0; i < directory_entry_format_count; i++) {
directory_members.push_back({L.readULEB128(), L.readULEB128()});
}
uint64_t directories_count = L.readULEB128();
for (size_t i = 0; i < directories_count; i++) {
for (auto& member : directory_members) {
switch (member.content_type) {
case DW_LNCT_path: {
include_directories_.emplace_back(
s_.readString(L, member.form, is_64bit_));
} break;
default: {
skipForm(L, member.form);
} break;
}
}
}
for (auto i : c10::irange(directories_count)) {
(void)i;
LOG_INFO("{} {}\n", i, include_directories_[i]);
}
auto file_name_entry_format_count = L.read<uint8_t>();
std::vector<Member> file_members;
file_members.reserve(file_name_entry_format_count);
for (size_t i = 0; i < file_name_entry_format_count; i++) {
file_members.push_back({L.readULEB128(), L.readULEB128()});
}
auto files_count = L.readULEB128();
for (size_t i = 0; i < files_count; i++) {
for (auto& member : file_members) {
switch (member.content_type) {
case DW_LNCT_path: {
file_names_.emplace_back(
s_.readString(L, member.form, is_64bit_));
} break;
case DW_LNCT_directory_index: {
file_directory_index_.emplace_back(readData(L, member.form));
UNWIND_CHECK(
file_directory_index_.back() < include_directories_.size(),
"directory index out of range");
} break;
default: {
skipForm(L, member.form);
} break;
}
}
}
for (auto i : c10::irange(files_count)) {
(void)i;
LOG_INFO("{} {} {}\n", i, file_names_[i], file_directory_index_[i]);
}
} else {
include_directories_.emplace_back(""); // implicit cwd
while (true) {
auto str = L.readCString();
if (*str == '\0') {
break;
}
include_directories_.emplace_back(str);
}
file_names_.emplace_back("");
file_directory_index_.emplace_back(0);
while (true) {
auto str = L.readCString();
if (*str == '\0') {
break;
}
auto directory_index = L.readULEB128();
L.readULEB128(); // mod_time
L.readULEB128(); // file_length
file_names_.emplace_back(str);
file_directory_index_.push_back(directory_index);
}
}
UNWIND_CHECK(
maximum_operations_per_instruction_ == 1,
"maximum_operations_per_instruction_ must be 1");
UNWIND_CHECK(
minimum_instruction_length_ == 1,
"minimum_instruction_length_ must be 1");
readProgram();
}
struct Entry {
uint32_t file = 1;
int64_t line = 1;
};
std::optional<Entry> find(uint64_t address) {
auto e = program_index_.find(address);
if (!e) {
return std::nullopt;
}
return all_programs_.at(*e).find(address);
}
std::string filename(uint64_t index) {
return fmt::format(
"{}/{}",
include_directories_.at(file_directory_index_.at(index)),
file_names_.at(index));
}
private:
void skipForm(CheckedLexer& L, uint64_t form) {
auto sz = formSize(form, is_64bit_ ? 8 : 4);
UNWIND_CHECK(sz, "unsupported form {}", form);
L.skip(int64_t(*sz));
}
uint64_t readData(CheckedLexer& L, uint64_t encoding) {
switch (encoding) {
case DW_FORM_data1:
return L.read<uint8_t>();
case DW_FORM_data2:
return L.read<uint16_t>();
case DW_FORM_data4:
return L.read<uint32_t>();
case DW_FORM_data8:
return L.read<uint64_t>();
case DW_FORM_udata:
return L.readULEB128();
default:
UNWIND_CHECK(false, "unsupported data encoding {}", encoding);
}
}
void produceEntry() {
if (shadow_) {
return;
}
if (ranges_.size() == 1) {
start_address_ = address_;
}
PRINT_LINE_TABLE(
"{:x}\t{}\t{}\n", address_, filename(entry_.file), entry_.line);
UNWIND_CHECK(
entry_.file < file_names_.size(),
"file index {} > {} entries",
entry_.file,
file_names_.size());
ranges_.add(address_, entry_, true);
}
void endSequence() {
if (shadow_) {
return;
}
PRINT_LINE_TABLE(
"{:x}\tEND\n", address_, filename(entry_.file), entry_.line);
program_index_.add(start_address_, all_programs_.size(), false);
program_index_.add(address_, std::nullopt, false);
all_programs_.emplace_back(std::move(ranges_));
ranges_ = RangeTable<Entry>();
}
void readProgram() {
while (program_.loc() < program_end_) {
PRINT_INST("{:x}: ", (char*)program_.loc() - (s_.debug_line.data));
uint8_t op = program_.read<uint8_t>();
if (op >= opcode_base_) {
auto op2 = int64_t(op - opcode_base_);
address_ += op2 / line_range_;
entry_.line += line_base_ + (op2 % line_range_);
PRINT_INST(
"address += {}, line += {}\n",
op2 / line_range_,
line_base_ + (op2 % line_range_));
produceEntry();
} else {
switch (op) {
case DW_LNS_extended_op: {
auto len = program_.readULEB128();
auto extended_op = program_.read<uint8_t>();
switch (extended_op) {
case DW_LNE_end_sequence: {
PRINT_INST("end_sequence\n");
endSequence();
entry_ = Entry{};
} break;
case DW_LNE_set_address: {
address_ = program_.read<uint64_t>();
if (!shadow_) {
PRINT_INST(
"set address {:x} {:x} {:x}\n",
address_,
min_address_,
max_address_);
}
shadow_ = address_ == 0;
} break;
default: {
PRINT_INST("skip extended op {}\n", extended_op);
program_.skip(int64_t(len - 1));
} break;
}
} break;
case DW_LNS_copy: {
PRINT_INST("copy\n");
produceEntry();
} break;
case DW_LNS_advance_pc: {
PRINT_INST("advance pc\n");
address_ += program_.readULEB128();
} break;
case DW_LNS_advance_line: {
entry_.line += program_.readSLEB128();
PRINT_INST("advance line {}\n", entry_.line);
} break;
case DW_LNS_set_file: {
PRINT_INST("set file\n");
entry_.file = program_.readULEB128();
} break;
case DW_LNS_const_add_pc: {
PRINT_INST("const add pc\n");
address_ += (255 - opcode_base_) / line_range_;
} break;
case DW_LNS_fixed_advance_pc: {
PRINT_INST("fixed advance pc\n");
address_ += program_.read<uint16_t>();
} break;
default: {
PRINT_INST("other {}\n", op);
auto n = standard_opcode_lengths_[op];
for (int i = 0; i < n; ++i) {
program_.readULEB128();
}
} break;
}
}
}
PRINT_INST(
"{:x}: end {:x}\n",
((char*)program_.loc() - s_.debug_line.data),
program_end_ - s_.debug_line.data);
}
uint64_t address_ = 0;
bool shadow_ = false;
bool parsed_ = false;
Entry entry_ = {};
std::vector<std::string> include_directories_;
std::vector<std::string> file_names_;
std::vector<uint64_t> file_directory_index_;
uint8_t segment_selector_size_ = 0;
uint8_t minimum_instruction_length_ = 0;
uint8_t maximum_operations_per_instruction_ = 0;
int8_t line_base_ = 0;
uint8_t line_range_ = 0;
uint8_t opcode_base_ = 0;
bool default_is_stmt_ = false;
CheckedLexer program_ = {nullptr};
char* program_end_ = nullptr;
uint64_t header_length_ = 0;
uint64_t length_ = 0;
bool is_64bit_ = false;
std::vector<uint8_t> standard_opcode_lengths_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
Sections& s_;
uint64_t offset_;
uint64_t start_address_ = 0;
RangeTable<uint64_t> program_index_;
std::vector<RangeTable<Entry>> all_programs_;
RangeTable<Entry> ranges_;
};
} // namespace torch::unwind
```
|
===========================================================================================================================================
SOURCE CODE FILE: mem_file.h
LINES: 1
SIZE: 4.64 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\mem_file.h
ENCODING: utf-8
```h
// Copyright (c) Meta Platforms, Inc. and affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <c10/util/error.h>
#include <elf.h>
#include <fcntl.h>
#include <fmt/format.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <torch/csrc/profiler/unwind/lexer.h>
#include <torch/csrc/profiler/unwind/unwind_error.h>
#include <unistd.h>
#include <cerrno>
#include <cstdio>
#include <cstring>
namespace torch::unwind {
struct Section {
char* data = nullptr;
size_t size = 0;
const char* string(size_t offset) {
return lexer(offset).readCString();
}
CheckedLexer lexer(size_t offset) {
return CheckedLexer(data + offset, data, data + size);
}
};
/// Memory maps a file into the address space read-only, and manages the
/// lifetime of the mapping. Here are a few use cases:
/// 1. Used in the loader to read in initial image, and to inspect
// ELF files for dependencies before callling dlopen.
///
/// 2. Used in unity to load the elf file.
struct MemFile {
explicit MemFile(const char* filename_)
: fd_(open(filename_, O_RDONLY)), name_(filename_) {
UNWIND_CHECK(
fd_ != -1,
"failed to open {}: {}",
filename_,
c10::utils::str_error(errno));
struct stat s {};
if (-1 == fstat(fd_, &s)) {
close(fd_); // destructors don't run during exceptions
UNWIND_CHECK(
false,
"failed to stat {}: {}",
filename_,
c10::utils::str_error(errno));
}
n_bytes_ = s.st_size;
UNWIND_CHECK(
n_bytes_ > sizeof(Elf64_Ehdr), "empty shared library: {}", filename_);
mem_ = (char*)mmap(nullptr, n_bytes_, PROT_READ, MAP_SHARED, fd_, 0);
if (MAP_FAILED == mem_) {
close(fd_);
UNWIND_CHECK(
false,
"failed to mmap {}: {}",
filename_,
c10::utils::str_error(errno));
}
ehdr_ = (Elf64_Ehdr*)mem_;
#define ELF_CHECK(cond) UNWIND_CHECK(cond, "not an ELF file: {}", filename_)
ELF_CHECK(ehdr_->e_ident[EI_MAG0] == ELFMAG0);
ELF_CHECK(ehdr_->e_ident[EI_MAG1] == ELFMAG1);
ELF_CHECK(ehdr_->e_ident[EI_MAG2] == ELFMAG2);
ELF_CHECK(ehdr_->e_ident[EI_MAG3] == ELFMAG3);
ELF_CHECK(ehdr_->e_ident[EI_CLASS] == ELFCLASS64);
ELF_CHECK(ehdr_->e_ident[EI_VERSION] == EV_CURRENT);
ELF_CHECK(ehdr_->e_version == EV_CURRENT);
ELF_CHECK(ehdr_->e_machine == EM_X86_64);
#undef ELF_CHECK
UNWIND_CHECK(
ehdr_->e_shoff + sizeof(Elf64_Shdr) * ehdr_->e_shnum <= n_bytes_,
"invalid section header table {} {} {}",
ehdr_->e_shoff + sizeof(Elf64_Shdr) * ehdr_->e_shnum,
n_bytes_,
ehdr_->e_shnum);
shdr_ = (Elf64_Shdr*)(mem_ + ehdr_->e_shoff);
UNWIND_CHECK(
ehdr_->e_shstrndx < ehdr_->e_shnum, "invalid strtab section offset");
auto& strtab_hdr = shdr_[ehdr_->e_shstrndx];
strtab_ = getSection(strtab_hdr);
}
MemFile(const MemFile&) = delete;
MemFile(MemFile&&) = delete;
MemFile& operator=(const MemFile&) = delete;
MemFile& operator=(MemFile&&) = delete;
[[nodiscard]] const char* data() const {
return (const char*)mem_;
}
/// Returns whether or not the file descriptor
/// of the underlying file is valid.
int valid() {
return fcntl(fd_, F_GETFD) != -1 || errno != EBADF;
}
~MemFile() {
if (mem_) {
munmap((void*)mem_, n_bytes_);
}
if (fd_ >= 0) {
close(fd_);
}
}
/// Returns the size of the underlying file defined by the `MemFile`
size_t size() {
return n_bytes_;
}
[[nodiscard]] int fd() const {
return fd_;
}
Section getSection(const Elf64_Shdr& shdr) {
UNWIND_CHECK(shdr.sh_offset + shdr.sh_size <= n_bytes_, "invalid section");
return Section{mem_ + shdr.sh_offset, shdr.sh_size};
}
Section getSection(const char* name, bool optional) {
for (int i = 0; i < ehdr_->e_shnum; i++) {
if (strcmp(strtab_.string(shdr_[i].sh_name), name) == 0) {
return getSection(shdr_[i]);
}
}
UNWIND_CHECK(optional, "{} has no section {}", name_, name);
return Section{nullptr, 0};
}
Section strtab() {
return strtab_;
}
private:
template <typename T>
T* load(size_t offset) {
UNWIND_CHECK(offset < n_bytes_, "out of range");
return (T*)(mem_ + offset);
}
int fd_;
char* mem_{nullptr};
size_t n_bytes_{0};
std::string name_;
Elf64_Ehdr* ehdr_;
Elf64_Shdr* shdr_;
Section strtab_ = {nullptr, 0};
};
} // namespace torch::unwind
```
|
==============================================================================================================================================
SOURCE CODE FILE: range_table.h
LINES: 2
SIZE: 2.13 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\range_table.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/profiler/unwind/unwind_error.h>
#include <algorithm>
#include <memory>
#include <optional>
#include <vector>
namespace torch::unwind {
template <typename T>
struct RangeTable {
RangeTable() {
// guarentee that lower_bound[-1] is always valid
addresses_.push_back(0);
payloads_.emplace_back(std::nullopt);
}
void add(uint64_t address, std::optional<T> payload, bool sorted) {
if (addresses_.back() > address) {
UNWIND_CHECK(!sorted, "expected addresses to be sorted");
sorted_ = false;
}
addresses_.push_back(address);
payloads_.emplace_back(std::move(payload));
}
std::optional<T> find(uint64_t address) {
maybeSort();
auto it = std::upper_bound(addresses_.begin(), addresses_.end(), address);
return payloads_.at(it - addresses_.begin() - 1);
}
void dump() {
for (size_t i = 0; i < addresses_.size(); i++) {
fmt::print("{} {:x}: {}\n", i, addresses_[i], payloads_[i] ? "" : "END");
}
}
size_t size() const {
return addresses_.size();
}
uint64_t back() {
maybeSort();
return addresses_.back();
}
private:
void maybeSort() {
if (sorted_) {
return;
}
std::vector<uint64_t> indices;
indices.reserve(addresses_.size());
for (size_t i = 0; i < addresses_.size(); i++) {
indices.push_back(i);
}
std::sort(indices.begin(), indices.end(), [&](uint64_t a, uint64_t b) {
return addresses_[a] < addresses_[b] ||
(addresses_[a] == addresses_[b] &&
bool(payloads_[a]) < bool(payloads_[b]));
});
std::vector<uint64_t> addresses;
std::vector<std::optional<T>> payloads;
addresses.reserve(addresses_.size());
payloads.reserve(addresses_.size());
for (auto i : indices) {
addresses.push_back(addresses_[i]);
payloads.push_back(payloads_[i]);
}
addresses_ = std::move(addresses);
payloads_ = std::move(payloads);
sorted_ = true;
}
bool sorted_ = true;
std::vector<uint64_t> addresses_;
std::vector<std::optional<T>> payloads_;
};
} // namespace torch::unwind
```
|
===========================================================================================================================================
SOURCE CODE FILE: sections.h
LINES: 1
SIZE: 3.70 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\sections.h
ENCODING: utf-8
```h
#pragma once
#include <cxxabi.h>
#include <elf.h>
#include <torch/csrc/profiler/unwind/dwarf_enums.h>
#include <torch/csrc/profiler/unwind/dwarf_symbolize_enums.h>
#include <torch/csrc/profiler/unwind/mem_file.h>
#include <torch/csrc/profiler/unwind/range_table.h>
#include <torch/csrc/profiler/unwind/unwind_error.h>
#include <cstdint>
namespace torch::unwind {
static std::string demangle(const std::string& mangled_name) {
int status = 0;
char* realname =
abi::__cxa_demangle(mangled_name.c_str(), nullptr, nullptr, &status);
if (status == 0) {
std::string demangled_name(realname);
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
free(realname);
return demangled_name;
} else {
return mangled_name;
}
}
struct Sections {
Sections() = default;
void parse(const char* name) {
library_ = std::make_unique<MemFile>(name);
strtab = library_->getSection(".strtab", false);
symtab = library_->getSection(".symtab", true);
debug_info = library_->getSection(".debug_info", true);
if (debug_info.size > 0) {
debug_abbrev = library_->getSection(".debug_abbrev", false);
debug_str = library_->getSection(".debug_str", false);
debug_line = library_->getSection(".debug_line", false);
// dwarf 5
debug_line_str = library_->getSection(".debug_line_str", true);
debug_rnglists = library_->getSection(".debug_rnglists", true);
debug_addr = library_->getSection(".debug_addr", true);
// dwarf 4
debug_ranges = library_->getSection(".debug_ranges", true);
}
parseSymtab();
}
Section debug_info;
Section debug_abbrev;
Section debug_str;
Section debug_line;
Section debug_line_str;
Section debug_rnglists;
Section debug_ranges;
Section debug_addr;
Section symtab;
Section strtab;
const char* readString(CheckedLexer& data, uint64_t encoding, bool is_64bit) {
switch (encoding) {
case DW_FORM_string: {
return data.readCString();
}
case DW_FORM_strp: {
return debug_str.string(readSegmentOffset(data, is_64bit));
}
case DW_FORM_line_strp: {
return debug_line_str.string(readSegmentOffset(data, is_64bit));
}
default:
UNWIND_CHECK(false, "unsupported string encoding {:x}", encoding);
}
}
uint64_t readSegmentOffset(CheckedLexer& data, bool is_64bit) {
return is_64bit ? data.read<uint64_t>() : data.read<uint32_t>();
}
std::optional<uint64_t> findDebugInfoOffset(uint64_t address) {
return debug_info_offsets_.find(address);
}
size_t compilationUnitCount() {
return debug_info_offsets_.size() / 2;
}
void addDebugInfoRange(
uint64_t start,
uint64_t end,
uint64_t debug_info_offset) {
debug_info_offsets_.add(start, debug_info_offset, false);
debug_info_offsets_.add(end, std::nullopt, false);
}
std::optional<std::string> findSubprogramName(uint64_t address) {
if (auto e = symbol_table_.find(address)) {
return demangle(strtab.string(*e));
}
return std::nullopt;
}
private:
void parseSymtab() {
auto L = symtab.lexer(0);
char* end = symtab.data + symtab.size;
while (L.loc() < end) {
auto symbol = L.read<Elf64_Sym>();
if (symbol.st_shndx == SHN_UNDEF ||
ELF64_ST_TYPE(symbol.st_info) != STT_FUNC) {
continue;
}
symbol_table_.add(symbol.st_value, symbol.st_name, false);
symbol_table_.add(symbol.st_value + symbol.st_size, std::nullopt, false);
}
}
std::unique_ptr<MemFile> library_;
RangeTable<uint64_t> debug_info_offsets_;
RangeTable<uint64_t> symbol_table_;
};
} // namespace torch::unwind
```
|
=========================================================================================================================================
SOURCE CODE FILE: unwind.h
LINES: 1
SIZE: 1.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\unwind.h
ENCODING: utf-8
```h
#pragma once
#include <c10/macros/Export.h>
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
namespace torch::unwind {
// gather current stack, relatively fast.
// gets faster once the cache of program counter locations is warm.
TORCH_API std::vector<void*> unwind();
struct Frame {
std::string filename;
std::string funcname;
uint64_t lineno;
};
enum class Mode { addr2line, fast, dladdr };
// note: symbolize is really slow
// it will launch an addr2line process that has to parse dwarf
// information from the libraries that frames point into.
// Callers should first batch up all the unique void* pointers
// across a number of unwind states and make a single call to
// symbolize.
TORCH_API std::vector<Frame> symbolize(
const std::vector<void*>& frames,
Mode mode);
// returns path to the library, and the offset of the addr inside the library
TORCH_API std::optional<std::pair<std::string, uint64_t>> libraryFor(
void* addr);
struct Stats {
size_t hits = 0;
size_t misses = 0;
size_t unsupported = 0;
size_t resets = 0;
};
Stats stats();
} // namespace torch::unwind
```
|
===============================================================================================================================================
SOURCE CODE FILE: unwind_error.h
LINES: 1
SIZE: 0.91 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\unwind_error.h
ENCODING: utf-8
```h
#pragma once
#include <fmt/format.h>
#include <optional>
#include <stdexcept>
namespace torch::unwind {
struct UnwindError : public std::runtime_error {
using std::runtime_error::runtime_error;
};
#define UNWIND_CHECK(cond, fmtstring, ...) \
do { \
if (!(cond)) { \
throw unwind::UnwindError(fmt::format( \
"{}:{}: " fmtstring, __FILE__, __LINE__, ##__VA_ARGS__)); \
} \
} while (0)
// #define LOG_INFO(...) fmt::print(__VA_ARGS__)
#define LOG_INFO(...)
// #define PRINT_INST(...) LOG_INFO(__VA_ARGS__)
#define PRINT_INST(...)
// #define PRINT_LINE_TABLE(...) LOG_INFO(__VA_ARGS__)
#define PRINT_LINE_TABLE(...)
} // namespace torch::unwind
```
|
===========================================================================================================================================
SOURCE CODE FILE: unwinder.h
LINES: 1
SIZE: 2.32 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\unwind\unwinder.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/profiler/unwind/action.h>
#include <torch/csrc/profiler/unwind/unwind_error.h>
#include <cstdint>
#include <limits>
namespace torch::unwind {
struct UnwindState {
int64_t rip, rbp, rsp;
};
struct Unwinder {
Unwinder(Action rsp, Action rip, Action rbp)
: kind_(rip.kind == A_UNDEFINED ? END : STANDARD),
reg_(rsp.reg),
off_(rsp.data),
rip_off_(rip.data),
rbp_off_(
rbp.kind == A_UNDEFINED ? std::numeric_limits<int64_t>::max()
: rbp.data),
deref_(rsp.kind == A_REG_PLUS_DATA_DEREF) {
check(rsp.reg == D_RSP || rsp.reg == D_RBP);
check(rip.kind == A_UNDEFINED || rip.kind == A_LOAD_CFA_OFFSET);
if (rsp.kind == A_REG_PLUS_DATA) {
check(rbp.kind == A_LOAD_CFA_OFFSET || rbp.kind == A_UNDEFINED);
} else if (rsp.kind == A_REG_PLUS_DATA_DEREF) {
if (rbp.kind == A_REG_PLUS_DATA_DEREF) {
check(rbp.reg == rsp.reg);
rbp_off_ -= rsp.data;
} else {
check(rbp.kind == A_UNDEFINED);
}
} else {
check(false);
}
}
void check(bool cond) {
if (!cond) {
throw UnwindError("Unwinding actions do not follow supported patterns");
}
}
bool terminator() const {
return kind_ != STANDARD;
}
bool isUnknown() const {
return kind_ == UNKNOWN;
}
// unwinder representing some pattern unsupported in
// current implementation
static Unwinder unknown() {
return Unwinder();
}
UnwindState run(const UnwindState& cur) const {
UnwindState r = cur;
r.rsp = (reg_ == D_RSP ? cur.rsp : cur.rbp) + off_;
r.rbp = rbp_off_ == std::numeric_limits<int64_t>::max()
? cur.rbp
// NOLINTNEXTLINE(performance-no-int-to-ptr)
: *(int64_t*)(r.rsp + rbp_off_);
if (deref_) {
// NOLINTNEXTLINE(performance-no-int-to-ptr)
r.rsp = *(int64_t*)r.rsp;
}
// NOLINTNEXTLINE(performance-no-int-to-ptr)
r.rip = *(int64_t*)(r.rsp + rip_off_);
return r;
}
private:
Unwinder() : kind_(UNKNOWN), reg_(0), off_(0), rip_off_(0), rbp_off_(0) {}
enum Kind { STANDARD, END, UNKNOWN } kind_;
uint32_t reg_;
int64_t off_;
int64_t rip_off_;
int64_t rbp_off_;
bool deref_{false};
};
} // namespace torch::unwind
```
|
================================================================================================================================
SOURCE CODE FILE: util.h
LINES: 1
SIZE: 6.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\util.h
ENCODING: utf-8
```h
#pragma once
#include <cstddef>
#include <cstdint>
#include <list>
#include <string>
#include <unordered_map>
#include <vector>
#include <ATen/record_function.h>
#include <c10/macros/Macros.h>
#include <c10/util/hash.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/frontend/source_range.h>
#include <optional>
// TODO: replace with pytorch/rfcs#43 when it is ready.
#define SOFT_ASSERT(cond, ...) \
[&]() -> bool { \
if (C10_UNLIKELY(!(cond))) { \
torch::profiler::impl::logSoftAssert( \
__func__, \
__FILE__, \
static_cast<uint32_t>(__LINE__), \
#cond, \
::c10::str(__VA_ARGS__)); \
if (torch::profiler::impl::softAssertRaises()) { \
TORCH_INTERNAL_ASSERT(cond, __VA_ARGS__); \
} else { \
TORCH_WARN_ONCE(__VA_ARGS__); \
} \
return false; \
} \
return true; \
}()
namespace torch::profiler::impl {
TORCH_API bool softAssertRaises();
TORCH_API void setSoftAssertRaises(std::optional<bool> value);
TORCH_API void logSoftAssert(
const char* func,
const char* file,
uint32_t line,
const char* cond,
const char* args);
TORCH_API inline void logSoftAssert(
const char* func,
const char* file,
uint32_t line,
const char* cond,
::c10::detail::CompileTimeEmptyString args) {
logSoftAssert(func, file, line, cond, (const char*)args);
}
TORCH_API void logSoftAssert(
const char* func,
const char* file,
uint32_t line,
const char* cond,
const std::string& args);
using shape =
std::variant<std::vector<int64_t>, std::vector<std::vector<int64_t>>>;
constexpr int TENSOR_LIST_DISPLAY_LENGTH_LIMIT = 30;
std::string getNvtxStr(
const char* name,
int64_t sequence_nr,
const std::vector<std::vector<int64_t>>& shapes,
at::RecordFunctionHandle op_id = 0,
const std::list<std::pair<at::RecordFunctionHandle, int>>& input_op_ids =
{});
struct TORCH_API FileLineFunc {
std::string filename;
size_t line;
std::string funcname;
};
struct TORCH_API SaveNcclMetaConfig {
bool truncate;
bool introspectMetadata;
bool introspectInputs;
bool introspectOutputs;
// Default constructor with default values
SaveNcclMetaConfig()
: truncate(true),
introspectMetadata(true),
introspectInputs(false),
introspectOutputs(false) {}
SaveNcclMetaConfig(
bool truncate,
bool introspectMetadata,
bool introspectInputs,
bool introspectOutputs)
: truncate(truncate),
introspectMetadata(introspectMetadata),
introspectInputs(introspectInputs),
introspectOutputs(introspectOutputs) {}
};
TORCH_API std::vector<FileLineFunc> prepareCallstack(
const std::vector<jit::StackEntry>& cs);
TORCH_API std::vector<std::string> callstackStr(
const std::vector<FileLineFunc>& cs);
TORCH_API std::string stacksToStr(
const std::vector<std::string>& stacks,
const char* delim);
TORCH_API std::vector<std::vector<int64_t>> inputSizes(
const at::RecordFunction& fn,
const bool flatten_list_enabled = false);
TORCH_API std::string variantShapesToStr(const std::vector<shape>& shapes);
TORCH_API std::string shapesToStr(
const std::vector<std::vector<int64_t>>& shapes);
TORCH_API std::string strListToStr(const std::vector<std::string>& types);
TORCH_API std::string inputOpIdsToStr(
const std::list<std::pair<at::RecordFunctionHandle, int>>& input_op_ids);
TORCH_API std::string ivalueToStr(const c10::IValue& val, bool isString);
TORCH_API std::string ivalueListToStr(const std::vector<c10::IValue>& list);
TORCH_API std::vector<std::string> inputTypes(const at::RecordFunction& fn);
std::unordered_map<std::string, c10::IValue> TORCH_API
saveExtraArgs(const at::RecordFunction& fn);
std::unordered_map<std::string, std::string> TORCH_API saveNcclMeta(
const at::RecordFunction& fn,
const SaveNcclMetaConfig& config = SaveNcclMetaConfig());
int getTensorStartHint(const at::Tensor& t);
bool checkFunctionOutputsForLogging(const at::RecordFunction& fn);
bool checkFunctionInputsForLogging(const at::RecordFunction& fn);
std::pair<bool, std::variant<int, std::vector<int>>> findStartAddrForTensors(
const c10::IValue& val);
uint64_t TORCH_API computeFlops(
const std::string& op_name,
const std::unordered_map<std::string, c10::IValue>& extra_args);
std::string shapeToStr(const std::vector<int64_t>& shape);
template <typename T>
class TORCH_API GlobalStateManager {
public:
static GlobalStateManager& singleton() {
/* library-local */ static GlobalStateManager singleton_;
return singleton_;
}
static void push(std::shared_ptr<T>&& state) {
if (singleton().state_) {
LOG(WARNING) << "GlobalStatePtr already exists!";
} else {
singleton().state_ = std::move(state);
}
}
static auto* get() {
return singleton().state_.get();
}
static std::shared_ptr<T> pop() {
auto out = singleton().state_;
singleton().state_.reset();
return out;
}
private:
GlobalStateManager() = default;
std::shared_ptr<T> state_;
};
struct HashCombine {
template <typename T0, typename T1>
size_t operator()(const std::pair<T0, T1>& i) {
return c10::get_hash((*this)(i.first), (*this)(i.second));
}
template <typename... Args>
size_t operator()(const std::tuple<Args...>& i) {
return c10::get_hash(i);
}
template <typename T>
size_t operator()(const T& i) {
return c10::get_hash(i);
}
};
#ifdef USE_DISTRIBUTED
constexpr auto kCommsName = "Collective name";
constexpr auto kDtype = "dtype";
constexpr auto kInMsgNelems = "In msg nelems";
constexpr auto kOutMsgNelems = "Out msg nelems";
constexpr auto kInSplit = "In split size";
constexpr auto kOutSplit = "Out split size";
constexpr auto kGlobalRankStart = "Global rank start";
constexpr auto kGlobalRankStride = "Global rank stride";
constexpr auto kGroupSize = "Group size";
constexpr auto kProcessGroupName = "Process Group Name";
constexpr auto kProcessGroupDesc = "Process Group Description";
constexpr auto kGroupRanks = "Process Group Ranks";
constexpr auto kRank = "Rank";
constexpr auto kP2pSrc = "Src Rank";
constexpr auto kP2pDst = "Dst Rank";
constexpr auto kInTensorsStart = "Input Tensors start";
constexpr auto kOutTensorsStart = "Output Tensors start";
#endif // USE_DISTRIBUTED
} // namespace torch::profiler::impl
```
|
=================================================================================================================================
SOURCE CODE FILE: python_dimname.h
LINES: 1
SIZE: 0.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\python_dimname.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/Dimname.h>
#include <torch/csrc/python_headers.h>
at::Dimname THPDimname_parse(PyObject* obj);
bool THPUtils_checkDimname(PyObject* obj);
bool THPUtils_checkDimnameList(PyObject* obj);
```
|
=================================================================================================================================
SOURCE CODE FILE: python_headers.h
LINES: 1
SIZE: 0.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\python_headers.h
ENCODING: utf-8
```h
#pragma once
// workaround for https://github.com/python/cpython/pull/23326
#include <cmath>
#include <complex>
// workaround for Python 2 issue: https://bugs.python.org/issue17120
// NOTE: It looks like this affects Python 3 as well.
#pragma push_macro("_XOPEN_SOURCE")
#pragma push_macro("_POSIX_C_SOURCE")
#undef _XOPEN_SOURCE
#undef _POSIX_C_SOURCE
#include <Python.h>
#include <frameobject.h>
#include <structseq.h>
#pragma pop_macro("_XOPEN_SOURCE")
#pragma pop_macro("_POSIX_C_SOURCE")
#ifdef copysign
#undef copysign
#endif
#if PY_MAJOR_VERSION < 3
#error "Python 2 has reached end-of-life and is no longer supported by PyTorch."
#endif
```
|
================================================================================================================================
SOURCE CODE FILE: serialization.h
LINES: 1
SIZE: 0.69 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\serialization.h
ENCODING: utf-8
```h
#ifndef THP_SERIALIZATION_INC
#define THP_SERIALIZATION_INC
#include <c10/core/StorageImpl.h>
#include <c10/util/intrusive_ptr.h>
template <class io>
void doRead(io fildes, void* buf, size_t nbytes);
template <class io>
void doWrite(io fildes, void* buf, size_t nbytes);
// Note that this takes a mutable storage because it may pass through
// to at::from_blob.
template <class io>
void THPStorage_writeFileRaw(
c10::StorageImpl* self,
io fd,
bool save_size,
uint64_t element_size);
template <class io>
c10::intrusive_ptr<c10::StorageImpl> THPStorage_readFileRaw(
io fd,
c10::intrusive_ptr<c10::StorageImpl> storage,
uint64_t element_size);
#endif
```
|
=================================================================================================================================
SOURCE CODE FILE: library.h
LINES: 1
SIZE: 10.24 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\stable\library.h
ENCODING: utf-8
```h
// this file can only have stable stuff! Akin to shim.h
// but unlike shim.h, this file can contain header-only C++
// code for better UX.
#include <torch/csrc/inductor/aoti_torch/c/shim.h>
#include <optional>
// use anonymous namespace to avoid collisions between differing
// versions of this file that may be included by different sources
namespace {
namespace detail {
// utility functions to detect optional
template <typename V>
struct is_optional : std::false_type {};
template <typename V>
struct is_optional<std::optional<V>> : std::true_type {};
} // namespace detail
template <
typename T,
std::enable_if_t<!detail::is_optional<T>::value, bool> = true>
StableIValue from(T val) {
static_assert(
sizeof(T) <= sizeof(StableIValue),
"StableLibrary stack does not support parameter types larger than 64 bits.");
return *reinterpret_cast<StableIValue*>(&val);
}
// Specialization for std::nullopt_t
template <>
StableIValue from(std::nullopt_t val) {
return from(nullptr);
}
// Specialization for std::optional
// [Handling std::optional]
// When the schema is represented by an optional type, say int?, then we
// expect the custom extension representation to be a std::optional<int>
// (critically NOT int!). In order for all parameters to be stably parsed and
// handled by our dispatcher, we liaison custom extension parameters through
// boxed kernels, meaning that every value will make its way to be an IValue:
//
// custom extension value --(from)-> StableIValue --(to_ivalue)-> IValue
//
// When the custom extension value is a literal that can be trivially
// casted to StableIValue, e.g., an int, a float, a pointer, this route is
// ...trivial. The below specialization is for a case when the custom
// extension value would NOT fit within a StableIValue: a std::optional.
//
// If the std::optional has no value, it is treated as std::nullopt,
// whose StableIValue representation is from(nullptr). Otherwise, we:
// 1. unwrap the std::optional<T>
// 2. recursively convert its value of type T to a StableIValue
// 3. allocate heap space for said StableIValue
// 4. convert the resulting StableIValue* into a StableIValue
//
// note that this allocates heap memory! which we expect to be cleaned
// up in the to_ivalue() function defined in shim_common.cpp. We
// purposefully hide this implementation detail from the user so that
// all the user needs to know is:
//
// The schema requests an optional (T?) so I must call `from` on a
// std::optional<T> or a std::nullopt.
template <typename T>
StableIValue from(std::optional<T> val) {
if (!val.has_value()) {
return from(std::nullopt);
}
StableIValue* heap_val = new StableIValue(from(val.value()));
return from(heap_val);
}
template <
typename T,
std::enable_if_t<!detail::is_optional<T>::value, bool> = true>
T to(StableIValue val) {
return *reinterpret_cast<T*>(&val);
}
template <
typename T,
std::enable_if_t<std::is_same_v<T, std::nullopt_t>, bool> = true>
T to(StableIValue val) {
// val should be equivalent to from(nullptr)
return std::nullopt;
}
// Specialization for std::optional, see [Handling std::optional] above
// as the semantic is the same but in reverse direction as we go from
// IValue --(from_ivalue)-> StableIValue --(to<T>)-> T in custom extension
template <
typename T,
std::enable_if_t<detail::is_optional<T>::value, bool> = true>
T to(StableIValue val) {
using V = typename T::value_type;
auto sivp = to<StableIValue*>(val);
// sivp is either nullptr or a pointer to a StableIValue
if (sivp == nullptr) {
return {};
}
auto inner_val = to<V>(*sivp);
// free the memory associated with StableIValue* sivp
delete sivp;
return std::make_optional(inner_val);
}
// end to helpers for converting between StableIValue and actual IValues
class StableLibrary final {
private:
TorchLibraryHandle lib_;
public:
enum class Kind {
DEF,
IMPL,
FRAGMENT,
};
// constructor
/// \private
///
/// Use STABLE_TORCH_LIBRARY or STABLE_TORCH_LIBRARY_IMPL() instead of using
/// these constructors directly
StableLibrary(
Kind kind,
const char* ns,
const char* k,
const char* file,
uint32_t line) {
if (kind == Kind::IMPL) {
aoti_torch_library_init_impl(ns, k, file, line, &lib_);
} else if (kind == Kind::DEF) {
aoti_torch_library_init_def(ns, file, line, &lib_);
} else { // kind == FRAGMENT
aoti_torch_library_init_fragment(ns, file, line, &lib_);
}
}
// do not permit copy
StableLibrary(const StableLibrary&) = delete;
StableLibrary& operator=(const StableLibrary&) = delete;
// do not permit move
StableLibrary(StableLibrary&& other) = delete;
StableLibrary& operator=(StableLibrary&& other) = delete;
~StableLibrary() {
aoti_torch_delete_library_object(lib_);
}
// corresponds to a limited, stable version of torch::library::impl()
// Inputs:
// name: the name of the function to implement
// fn: a boxed function with schema
// (StableIValue* stack, uint64_t num_inputs, uint64_t num_outputs) ->
// void
// fn should follow the calling convention of our boxed kernels that convert
// to IValues. fn will be called with a StableIValue* array of length
// max(num_inputs, num_outputs), where the first num_inputs entries are
// populated with inputs. fn is responsible for stealing the memory of the
// inputs, in effect "popping" them off the stack, and then populating the
// stack with StableIValue outputs. Concretely, fn should:
// 1. read StableIValue inputs from the given stack
// 2. convert the inputs to the proper types
// 3. call the function corresponding to name with the inputs
// 4. convert the outputs to StableIValues
// 5. populate the now empty stack with StableIValue outputs
// If the operation corresponding to name takes in 4 inputs and returns 2
// outputs, fn should expect stack to contain 4 StableIValues:
// [stable_arg1, stable_arg2, stable_arg3, stable_arg4]
// to end, fn should fill the stack with 2 StableIValues representing outputs:
// [stable_ret1, stable_ret2, -, -]
StableLibrary& impl(
const char* name,
void (*fn)(StableIValue*, uint64_t, uint64_t)) {
aoti_torch_library_impl(lib_, name, fn);
return *this;
}
// corresponds to a limited, stable version of torch::library::def()
StableLibrary& def(const char* schema) {
aoti_torch_library_def(lib_, schema);
return *this;
}
};
class StableTorchLibraryInit final {
private:
using InitFn = void(StableLibrary&);
StableLibrary lib_;
public:
StableTorchLibraryInit(
StableLibrary::Kind kind,
InitFn* fn,
const char* ns,
const char* k,
const char* file,
uint32_t line)
: lib_(kind, ns, k, file, line) {
fn(lib_);
}
};
} // namespace
// macros copied from c10/macros/Macros.h
#ifdef __COUNTER__
#define STABLE_UID __COUNTER__
#else
#define STABLE_UID __LINE__
#endif
#define STABLE_CONCATENATE_IMPL(s1, s2) s1##s2
#define STABLE_CONCATENATE(s1, s2) STABLE_CONCATENATE_IMPL(s1, s2)
// end of macros copied from c10/macros/Macros.h
#define STABLE_TORCH_LIBRARY_IMPL(ns, k, m) \
_STABLE_TORCH_LIBRARY_IMPL(ns, k, m, STABLE_UID)
#define _STABLE_TORCH_LIBRARY_IMPL(ns, k, m, uid) \
static void STABLE_CONCATENATE( \
STABLE_TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid)(StableLibrary&); \
static const StableTorchLibraryInit STABLE_CONCATENATE( \
STABLE_TORCH_LIBRARY_IMPL_static_init_##ns##_##k##_, uid)( \
StableLibrary::Kind::IMPL, \
&STABLE_CONCATENATE(STABLE_TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid), \
#ns, \
#k, \
__FILE__, \
__LINE__); \
void STABLE_CONCATENATE( \
STABLE_TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid)(StableLibrary & m)
#define STABLE_TORCH_LIBRARY(ns, m) \
static void STABLE_TORCH_LIBRARY_init_##ns(StableLibrary&); \
static const StableTorchLibraryInit STABLE_TORCH_LIBRARY_static_init_##ns( \
StableLibrary::Kind::DEF, \
&STABLE_TORCH_LIBRARY_init_##ns, \
#ns, \
nullptr, \
__FILE__, \
__LINE__); \
void STABLE_TORCH_LIBRARY_init_##ns(StableLibrary& m)
#define STABLE_TORCH_LIBRARY_FRAGMENT(ns, m) \
_STABLE_TORCH_LIBRARY_FRAGMENT(ns, m, STABLE_UID)
#define _STABLE_TORCH_LIBRARY_FRAGMENT(ns, m, uid) \
static void STABLE_CONCATENATE( \
STABLE_TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid)(StableLibrary&); \
static const StableTorchLibraryInit STABLE_CONCATENATE( \
STABLE_TORCH_LIBRARY_FRAGMENT_static_init_##ns##_, uid)( \
StableLibrary::Kind::FRAGMENT, \
&STABLE_CONCATENATE(STABLE_TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid), \
#ns, \
nullptr, \
__FILE__, \
__LINE__); \
void STABLE_CONCATENATE( \
STABLE_TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid)(StableLibrary & m)
```
|
=======================================================================================================================================
SOURCE CODE FILE: python_tensor.h
LINES: 1
SIZE: 1.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\tensor\python_tensor.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/Device.h>
#include <c10/core/DispatchKey.h>
#include <c10/core/ScalarType.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
namespace at {
class Tensor;
} // namespace at
namespace torch::tensors {
// Initializes the Python tensor type objects: torch.FloatTensor,
// torch.DoubleTensor, etc. and binds them in their containing modules.
TORCH_PYTHON_API void initialize_python_bindings();
// Same as set_default_tensor_type() but takes a PyObject*
TORCH_PYTHON_API void py_set_default_tensor_type(PyObject* type_obj);
// Same as py_set_default_tensor_type, but only changes the dtype (ScalarType).
TORCH_PYTHON_API void py_set_default_dtype(PyObject* dtype_obj);
// Gets the DispatchKey for the default tensor type.
//
// TODO: This is nuts! There is no reason to let the default tensor type id
// change. Probably only store ScalarType, as that's the only flex point
// we support.
TORCH_PYTHON_API c10::DispatchKey get_default_dispatch_key();
TORCH_PYTHON_API at::Device get_default_device();
// Gets the ScalarType for the default tensor type.
TORCH_PYTHON_API at::ScalarType get_default_scalar_type();
} // namespace torch::tensors
```
|
========================================================================================================================
SOURCE CODE FILE: utils.h
LINES: 1
SIZE: 9.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils.h
ENCODING: utf-8
```h
#ifndef THP_UTILS_H
#define THP_UTILS_H
#include <ATen/ATen.h>
#include <c10/util/Exception.h>
#include <torch/csrc/Storage.h>
#include <torch/csrc/THConcat.h>
#include <torch/csrc/utils/object_ptr.h>
#include <torch/csrc/utils/python_compat.h>
#include <torch/csrc/utils/python_numbers.h>
#include <string>
#include <type_traits>
#include <vector>
#ifdef USE_CUDA
#include <c10/cuda/CUDAStream.h>
#endif
#define THPUtils_(NAME) TH_CONCAT_4(THP, Real, Utils_, NAME)
#define THPUtils_typename(obj) (Py_TYPE(obj)->tp_name)
#if defined(__GNUC__) || defined(__ICL) || defined(__clang__)
#define THP_EXPECT(x, y) (__builtin_expect((x), (y)))
#else
#define THP_EXPECT(x, y) (x)
#endif
#define THPUtils_checkReal_FLOAT(object) \
(PyFloat_Check(object) || PyLong_Check(object))
#define THPUtils_unpackReal_FLOAT(object) \
(PyFloat_Check(object) ? PyFloat_AsDouble(object) \
: PyLong_Check(object) \
? PyLong_AsLongLong(object) \
: (throw std::runtime_error("Could not parse real"), 0))
#define THPUtils_checkReal_INT(object) PyLong_Check(object)
#define THPUtils_unpackReal_INT(object) \
(PyLong_Check(object) \
? PyLong_AsLongLong(object) \
: (throw std::runtime_error("Could not parse real"), 0))
#define THPUtils_unpackReal_BOOL(object) \
(PyBool_Check(object) \
? object \
: (throw std::runtime_error("Could not parse real"), Py_False))
#define THPUtils_unpackReal_COMPLEX(object) \
(PyComplex_Check(object) \
? (c10::complex<double>( \
PyComplex_RealAsDouble(object), PyComplex_ImagAsDouble(object))) \
: PyFloat_Check(object) \
? (c10::complex<double>(PyFloat_AsDouble(object), 0)) \
: PyLong_Check(object) \
? (c10::complex<double>(PyLong_AsLongLong(object), 0)) \
: (throw std::runtime_error("Could not parse real"), \
c10::complex<double>(0, 0)))
#define THPUtils_checkReal_BOOL(object) PyBool_Check(object)
#define THPUtils_checkReal_COMPLEX(object) \
PyComplex_Check(object) || PyFloat_Check(object) || PyLong_Check(object) || \
PyInt_Check(object)
#define THPUtils_newReal_FLOAT(value) PyFloat_FromDouble(value)
#define THPUtils_newReal_INT(value) PyInt_FromLong(value)
#define THPUtils_newReal_BOOL(value) PyBool_FromLong(value)
#define THPUtils_newReal_COMPLEX(value) \
PyComplex_FromDoubles(value.real(), value.imag())
#define THPDoubleUtils_checkReal(object) THPUtils_checkReal_FLOAT(object)
#define THPDoubleUtils_unpackReal(object) \
(double)THPUtils_unpackReal_FLOAT(object)
#define THPDoubleUtils_newReal(value) THPUtils_newReal_FLOAT(value)
#define THPFloatUtils_checkReal(object) THPUtils_checkReal_FLOAT(object)
#define THPFloatUtils_unpackReal(object) \
(float)THPUtils_unpackReal_FLOAT(object)
#define THPFloatUtils_newReal(value) THPUtils_newReal_FLOAT(value)
#define THPHalfUtils_checkReal(object) THPUtils_checkReal_FLOAT(object)
#define THPHalfUtils_unpackReal(object) \
(at::Half) THPUtils_unpackReal_FLOAT(object)
#define THPHalfUtils_newReal(value) PyFloat_FromDouble(value)
#define THPHalfUtils_newAccreal(value) THPUtils_newReal_FLOAT(value)
#define THPComplexDoubleUtils_checkReal(object) \
THPUtils_checkReal_COMPLEX(object)
#define THPComplexDoubleUtils_unpackReal(object) \
THPUtils_unpackReal_COMPLEX(object)
#define THPComplexDoubleUtils_newReal(value) THPUtils_newReal_COMPLEX(value)
#define THPComplexFloatUtils_checkReal(object) \
THPUtils_checkReal_COMPLEX(object)
#define THPComplexFloatUtils_unpackReal(object) \
(c10::complex<float>)THPUtils_unpackReal_COMPLEX(object)
#define THPComplexFloatUtils_newReal(value) THPUtils_newReal_COMPLEX(value)
#define THPBFloat16Utils_checkReal(object) THPUtils_checkReal_FLOAT(object)
#define THPBFloat16Utils_unpackReal(object) \
(at::BFloat16) THPUtils_unpackReal_FLOAT(object)
#define THPBFloat16Utils_newReal(value) PyFloat_FromDouble(value)
#define THPBFloat16Utils_newAccreal(value) THPUtils_newReal_FLOAT(value)
#define THPBoolUtils_checkReal(object) THPUtils_checkReal_BOOL(object)
#define THPBoolUtils_unpackReal(object) THPUtils_unpackReal_BOOL(object)
#define THPBoolUtils_newReal(value) THPUtils_newReal_BOOL(value)
#define THPBoolUtils_checkAccreal(object) THPUtils_checkReal_BOOL(object)
#define THPBoolUtils_unpackAccreal(object) \
(int64_t) THPUtils_unpackReal_BOOL(object)
#define THPBoolUtils_newAccreal(value) THPUtils_newReal_BOOL(value)
#define THPLongUtils_checkReal(object) THPUtils_checkReal_INT(object)
#define THPLongUtils_unpackReal(object) \
(int64_t) THPUtils_unpackReal_INT(object)
#define THPLongUtils_newReal(value) THPUtils_newReal_INT(value)
#define THPIntUtils_checkReal(object) THPUtils_checkReal_INT(object)
#define THPIntUtils_unpackReal(object) (int)THPUtils_unpackReal_INT(object)
#define THPIntUtils_newReal(value) THPUtils_newReal_INT(value)
#define THPShortUtils_checkReal(object) THPUtils_checkReal_INT(object)
#define THPShortUtils_unpackReal(object) (short)THPUtils_unpackReal_INT(object)
#define THPShortUtils_newReal(value) THPUtils_newReal_INT(value)
#define THPCharUtils_checkReal(object) THPUtils_checkReal_INT(object)
#define THPCharUtils_unpackReal(object) (char)THPUtils_unpackReal_INT(object)
#define THPCharUtils_newReal(value) THPUtils_newReal_INT(value)
#define THPByteUtils_checkReal(object) THPUtils_checkReal_INT(object)
#define THPByteUtils_unpackReal(object) \
(unsigned char)THPUtils_unpackReal_INT(object)
#define THPByteUtils_newReal(value) THPUtils_newReal_INT(value)
// quantized types
#define THPQUInt8Utils_checkReal(object) THPUtils_checkReal_INT(object)
#define THPQUInt8Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object)
#define THPQUInt8Utils_newReal(value) THPUtils_newReal_INT(value)
#define THPQInt8Utils_checkReal(object) THPUtils_checkReal_INT(object)
#define THPQInt8Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object)
#define THPQInt8Utils_newReal(value) THPUtils_newReal_INT(value)
#define THPQInt32Utils_checkReal(object) THPUtils_checkReal_INT(object)
#define THPQInt32Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object)
#define THPQInt32Utils_newReal(value) THPUtils_newReal_INT(value)
#define THPQUInt4x2Utils_checkReal(object) THPUtils_checkReal_INT(object)
#define THPQUInt4x2Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object)
#define THPQUInt4x2Utils_newReal(value) THPUtils_newReal_INT(value)
#define THPQUInt2x4Utils_checkReal(object) THPUtils_checkReal_INT(object)
#define THPQUInt2x4Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object)
#define THPQUInt2x4Utils_newReal(value) THPUtils_newReal_INT(value)
/*
From https://github.com/python/cpython/blob/v3.7.0/Modules/xxsubtype.c
If compiled as a shared library, some compilers don't allow addresses of
Python objects defined in other libraries to be used in static PyTypeObject
initializers. The DEFERRED_ADDRESS macro is used to tag the slots where such
addresses appear; the module init function that adds the PyTypeObject to the
module must fill in the tagged slots at runtime. The argument is for
documentation -- the macro ignores it.
*/
#define DEFERRED_ADDRESS(ADDR) nullptr
TORCH_PYTHON_API void THPUtils_setError(const char* format, ...);
TORCH_PYTHON_API void THPUtils_invalidArguments(
PyObject* given_args,
PyObject* given_kwargs,
const char* function_name,
size_t num_options,
...);
bool THPUtils_checkIntTuple(PyObject* arg);
std::vector<int> THPUtils_unpackIntTuple(PyObject* arg);
TORCH_PYTHON_API void THPUtils_addPyMethodDefs(
std::vector<PyMethodDef>& vector,
const PyMethodDef* methods);
int THPUtils_getCallable(PyObject* arg, PyObject** result);
typedef THPPointer<THPGenerator> THPGeneratorPtr;
typedef class THPPointer<THPStorage> THPStoragePtr;
TORCH_PYTHON_API std::vector<int64_t> THPUtils_unpackLongs(PyObject* arg);
PyObject* THPUtils_dispatchStateless(
PyObject* tensor,
const char* name,
PyObject* args,
PyObject* kwargs);
template <typename _real, typename = void>
struct mod_traits {};
template <typename _real>
struct mod_traits<_real, std::enable_if_t<std::is_floating_point_v<_real>>> {
static _real mod(_real a, _real b) {
return fmod(a, b);
}
};
template <typename _real>
struct mod_traits<_real, std::enable_if_t<std::is_integral_v<_real>>> {
static _real mod(_real a, _real b) {
return a % b;
}
};
void setBackCompatBroadcastWarn(bool warn);
bool getBackCompatBroadcastWarn();
void setBackCompatKeepdimWarn(bool warn);
bool getBackCompatKeepdimWarn();
bool maybeThrowBackCompatKeepdimWarn(char* func);
// NB: This is in torch/csrc/cuda/utils.cpp, for whatever reason
#ifdef USE_CUDA
std::vector<std::optional<at::cuda::CUDAStream>>
THPUtils_PySequence_to_CUDAStreamList(PyObject* obj);
#endif
void storage_fill(const at::Storage& self, uint8_t value);
void storage_set(const at::Storage& self, ptrdiff_t idx, uint8_t value);
uint8_t storage_get(const at::Storage& self, ptrdiff_t idx);
#endif
```
|
===================================================================================================================================
SOURCE CODE FILE: byte_order.h
LINES: 1
SIZE: 2.28 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\byte_order.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/BFloat16.h>
#include <c10/util/Float8_e4m3fn.h>
#include <c10/util/Float8_e4m3fnuz.h>
#include <c10/util/Float8_e5m2.h>
#include <c10/util/Float8_e5m2fnuz.h>
#include <c10/util/Half.h>
#include <torch/csrc/Export.h>
#include <cstddef>
#include <cstdint>
#ifdef __FreeBSD__
#include <sys/endian.h>
#include <sys/types.h>
#define thp_bswap16(x) bswap16(x)
#define thp_bswap32(x) bswap32(x)
#define thp_bswap64(x) bswap64(x)
#elif defined(__APPLE__)
#include <libkern/OSByteOrder.h>
#define thp_bswap16(x) OSSwapInt16(x)
#define thp_bswap32(x) OSSwapInt32(x)
#define thp_bswap64(x) OSSwapInt64(x)
#elif defined(__GNUC__) && !defined(__MINGW32__)
#include <byteswap.h>
#define thp_bswap16(x) bswap_16(x)
#define thp_bswap32(x) bswap_32(x)
#define thp_bswap64(x) bswap_64(x)
#elif defined _WIN32 || defined _WIN64
#define thp_bswap16(x) _byteswap_ushort(x)
#define thp_bswap32(x) _byteswap_ulong(x)
#define thp_bswap64(x) _byteswap_uint64(x)
#endif
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define to_be16(x) thp_bswap16(x)
#define from_be16(x) thp_bswap16(x)
#define to_be32(x) thp_bswap32(x)
#define from_be32(x) thp_bswap32(x)
#define to_be64(x) thp_bswap64(x)
#define from_be64(x) thp_bswap64(x)
#define to_le16(x) (x)
#define from_le16(x) (x)
#define to_le32(x) (x)
#define from_le32(x) (x)
#define to_le64(x) (x)
#define from_le64(x) (x)
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define to_be16(x) (x)
#define from_be16(x) (x)
#define to_be32(x) (x)
#define from_be32(x) (x)
#define to_be64(x) (x)
#define from_be64(x) (x)
#define to_le16(x) thp_bswap16(x)
#define from_le16(x) thp_bswap16(x)
#define to_le32(x) thp_bswap32(x)
#define from_le32(x) thp_bswap32(x)
#define to_le64(x) thp_bswap64(x)
#define from_le64(x) thp_bswap64(x)
#else
#error Unexpected or undefined __BYTE_ORDER__
#endif
namespace torch::utils {
enum THPByteOrder { THP_LITTLE_ENDIAN = 0, THP_BIG_ENDIAN = 1 };
TORCH_API THPByteOrder THP_nativeByteOrder();
template <typename T, typename U>
TORCH_API void THP_decodeBuffer(T* dst, const uint8_t* src, U type, size_t len);
template <typename T>
TORCH_API void THP_encodeBuffer(
uint8_t* dst,
const T* src,
THPByteOrder order,
size_t len);
} // namespace torch::utils
```
|
========================================================================================================================================
SOURCE CODE FILE: cpp_stacktraces.h
LINES: 1
SIZE: 0.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\cpp_stacktraces.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/profiler/unwind/unwind.h>
namespace torch {
TORCH_API bool get_cpp_stacktraces_enabled();
TORCH_API torch::unwind::Mode get_symbolize_mode();
} // namespace torch
```
|
=====================================================================================================================================
SOURCE CODE FILE: cuda_enabled.h
LINES: 1
SIZE: 0.18 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\cuda_enabled.h
ENCODING: utf-8
```h
#pragma once
namespace torch::utils {
inline constexpr bool cuda_enabled() {
#ifdef USE_CUDA
return true;
#else
return false;
#endif
}
} // namespace torch::utils
```
|
=========================================================================================================================================
SOURCE CODE FILE: device_lazy_init.h
LINES: 1
SIZE: 2.34 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\device_lazy_init.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/TensorOptions.h>
#include <torch/csrc/Export.h>
// device_lazy_init() is always compiled, even for CPU-only builds.
namespace torch::utils {
/**
* This mechanism of lazy initialization is designed for each device backend.
* Currently, CUDA and XPU follow this design. This function `device_lazy_init`
* MUST be called before you attempt to access any Type(CUDA or XPU) object
* from ATen, in any way. It guarantees that the device runtime status is lazily
* initialized when the first runtime API is requested.
*
* Here are some common ways that a device object may be retrieved:
* - You call getNonVariableType or getNonVariableTypeOpt
* - You call toBackend() on a Type
*
* It's important to do this correctly, because if you forget to add it you'll
* get an oblique error message seems like "Cannot initialize CUDA without
* ATen_cuda library" or "Cannot initialize XPU without ATen_xpu library" if you
* try to use CUDA or XPU functionality from a CPU-only build, which is not good
* UX.
*/
TORCH_PYTHON_API void device_lazy_init(at::DeviceType device_type);
TORCH_PYTHON_API void set_requires_device_init(
at::DeviceType device_type,
bool value);
inline bool is_device_lazy_init_supported(at::DeviceType device_type) {
// Add more devices here to enable lazy initialization.
return (
device_type == at::DeviceType::CUDA ||
device_type == at::DeviceType::XPU ||
device_type == at::DeviceType::HPU ||
device_type == at::DeviceType::MTIA ||
device_type == at::DeviceType::PrivateUse1);
}
inline void maybe_initialize_device(at::Device& device) {
if (is_device_lazy_init_supported(device.type())) {
device_lazy_init(device.type());
}
}
inline void maybe_initialize_device(std::optional<at::Device>& device) {
if (!device.has_value()) {
return;
}
maybe_initialize_device(device.value());
}
inline void maybe_initialize_device(const at::TensorOptions& options) {
auto device = options.device();
maybe_initialize_device(device);
}
inline void maybe_initialize_device(
std::optional<at::DeviceType>& device_type) {
if (!device_type.has_value()) {
return;
}
maybe_initialize_device(device_type.value());
}
bool is_device_initialized(at::DeviceType device_type);
} // namespace torch::utils
```
|
===============================================================================================================================================
SOURCE CODE FILE: disable_torch_function.h
LINES: 1
SIZE: 1.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\disable_torch_function.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/DispatchKey.h>
#include <c10/core/impl/LocalDispatchKeySet.h>
#include <torch/csrc/python_headers.h>
namespace torch {
// Sometimes we don't want infinite recursion for subclasses,
// Or a way to achieve the old behaviour.
// This is an internal utility, not exposed to users.
bool torch_function_enabled();
PyObject* disabled_torch_function_impl();
PyObject* disabled_torch_dispatch_impl();
void set_disabled_torch_function_impl(PyObject* value);
void set_disabled_torch_dispatch_impl(PyObject* value);
// Set ignore_mode to true if you're trying to collect overloaded arguments;
// using mode here will improperly cause you to add ALL objects to the
// overloaded list even if they don't actually have __torch_function__
bool check_has_torch_function(PyObject* obj, bool ignore_mode = false);
struct DisableTorchDispatch {
DisableTorchDispatch()
: guard_(c10::DispatchKeySet(
{c10::DispatchKey::Python, c10::DispatchKey::PreDispatch})),
guard_tls_snapshot_(c10::DispatchKey::PythonTLSSnapshot) {}
c10::impl::ExcludeDispatchKeyGuard guard_;
c10::impl::ExcludeDispatchKeyGuard guard_tls_snapshot_;
};
} // namespace torch
PyObject* THPModule_isEnabledTorchFunction(PyObject* self, PyObject* unused);
PyObject* THPModule_isAllDisabledTorchFunction(
PyObject* self,
PyObject* unused);
PyObject* THPModule_DisableTorchFunctionType();
PyObject* THPModule_DisableTorchFunctionSubclassType();
PyObject* THPModule_disable_torch_function(PyObject* self, PyObject* args);
PyObject* THPModule_disable_torch_dispatch(PyObject* self, PyObject* args);
PyObject* THPModule_has_torch_function(PyObject*, PyObject* arg);
PyObject* THPModule_has_torch_function_unary(PyObject*, PyObject* obj);
PyObject* THPModule_has_torch_function_variadic(
PyObject*,
PyObject* const* args,
Py_ssize_t nargs);
```
|
======================================================================================================================================================
SOURCE CODE FILE: generated_serialization_types.h
LINES: 1
SIZE: 116.76 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\generated_serialization_types.h
ENCODING: utf-8
```h
// @generated by update_schema.py
// checksum<<31c433c768b3f1bb61a5e8f4ceffc40c857bd80cf4fa0fc33fd03fa5ebb6c4d8>>
// clang-format off
#pragma once
#include <optional>
#include <stdexcept>
#include <string>
#include <unordered_map>
#include <variant>
#include <vector>
#include <nlohmann/json.hpp>
#ifndef NLOHMANN_JSON_NAMESPACE_BEGIN
#define NLOHMANN_JSON_NAMESPACE_BEGIN namespace nlohmann {
#endif
#ifndef NLOHMANN_JSON_NAMESPACE_END
#define NLOHMANN_JSON_NAMESPACE_END }
#endif
// https://github.com/nlohmann/json/pull/2117
NLOHMANN_JSON_NAMESPACE_BEGIN
template <typename T>
struct adl_serializer<std::optional<T>> {
static void to_json(json& j, const std::optional<T>& opt) {
if (opt == std::nullopt) {
j = nullptr;
} else {
j = *opt; // this will call adl_serializer<T>::to_json which will
// find the free function to_json in T's namespace!
}
}
static void from_json(const json& j, std::optional<T>& opt) {
if (j.is_null()) {
opt = std::nullopt;
} else {
opt = j.template get<T>(); // same as above, but with
// adl_serializer<T>::from_json
}
}
};
NLOHMANN_JSON_NAMESPACE_END
namespace torch {
namespace _export {
template <typename T>
class ForwardRef {
static_assert(!std::is_reference_v<T>, "ForwardRef cannot be a reference type");
public:
ForwardRef(): ptr_(std::make_unique<T>()) {}
ForwardRef(ForwardRef<T>&&) = default;
ForwardRef(const ForwardRef<T>& other): ptr_(std::make_unique<T>(*other.ptr_)) {}
ForwardRef<T>& operator=(ForwardRef<T>&&) = default;
ForwardRef<T>& operator=(const ForwardRef<T>& other) {
ptr_ = std::make_unique<T>(*other.ptr_);
return *this;
}
const T& operator*() const {
return *ptr_;
}
const T* operator->() const {
return ptr_.get();
}
void emplace(T&& t) {
ptr_ = std::make_unique<T>(std::move(t));
}
private:
std::unique_ptr<T> ptr_;
};
template <typename T>
void to_json(nlohmann::json& j, const ForwardRef<T>& p) {
j = *p;
}
template <typename T>
void from_json(const nlohmann::json& j, ForwardRef<T>& p) {
p.emplace(j.template get<T>());
}
class F64 {
public:
double get() const {
return value_;
}
void set(double value) {
value_ = value;
}
private:
double value_;
};
inline void to_json(nlohmann::json& j, const F64& f) {
if (std::isinf(f.get())) {
j = "Infinity";
} else if (std::isinf(-f.get())) {
j = "-Infinity";
} else if (std::isnan(f.get())) {
j = "NaN";
} else {
j = f.get();
}
}
inline void from_json(const nlohmann::json& j, F64& f) {
if (j == "Infinity") {
f.set(std::numeric_limits<double>::infinity());
} else if (j == "-Infinity") {
f.set(-std::numeric_limits<double>::infinity());
} else if (j == "NaN") {
f.set(std::numeric_limits<double>::quiet_NaN());
} else {
f.set(j.get<double>());
}
}
class AOTInductorModelPickleData;
class Argument;
class BufferMutationSpec;
class ConstantValue;
class CustomObjArgument;
class Device;
class ExportedProgram;
class ExternKernelNode;
class ExternKernelNodes;
class GradientToParameterSpec;
class GradientToUserInputSpec;
class Graph;
class GraphArgument;
class GraphModule;
class GraphSignature;
class InputSpec;
class InputToBufferSpec;
class InputToConstantInputSpec;
class InputToCustomObjSpec;
class InputToParameterSpec;
class InputToTensorConstantSpec;
class InputTokenSpec;
class LossOutputSpec;
class Model;
class ModuleCallEntry;
class ModuleCallSignature;
class NamedArgument;
class NamedTupleDef;
class Node;
class OptionalTensorArgument;
class OutputSpec;
class OutputTokenSpec;
class Program;
class RangeConstraint;
class SchemaVersion;
class SymBool;
class SymBoolArgument;
class SymExpr;
class SymExprHint;
class SymFloat;
class SymFloatArgument;
class SymInt;
class SymIntArgument;
class TensorArgument;
class TensorMeta;
class TokenArgument;
class UserInputMutationSpec;
class UserInputSpec;
class UserOutputSpec;
enum class ArgumentKind {
UNKNOWN = 0,
POSITIONAL = 1,
KEYWORD = 2,
};
inline std::string_view printEnum(const ArgumentKind& e) {
switch (e) {
case ArgumentKind::UNKNOWN: return "UNKNOWN";
case ArgumentKind::POSITIONAL: return "POSITIONAL";
case ArgumentKind::KEYWORD: return "KEYWORD";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, ArgumentKind& t) {
if (s == "UNKNOWN") { t = ArgumentKind::UNKNOWN; return; }
if (s == "POSITIONAL") { t = ArgumentKind::POSITIONAL; return; }
if (s == "KEYWORD") { t = ArgumentKind::KEYWORD; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
enum class Layout {
Unknown = 0,
SparseCoo = 1,
SparseCsr = 2,
SparseCsc = 3,
SparseBsr = 4,
SparseBsc = 5,
_mkldnn = 6,
Strided = 7,
};
inline std::string_view printEnum(const Layout& e) {
switch (e) {
case Layout::Unknown: return "Unknown";
case Layout::SparseCoo: return "SparseCoo";
case Layout::SparseCsr: return "SparseCsr";
case Layout::SparseCsc: return "SparseCsc";
case Layout::SparseBsr: return "SparseBsr";
case Layout::SparseBsc: return "SparseBsc";
case Layout::_mkldnn: return "_mkldnn";
case Layout::Strided: return "Strided";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, Layout& t) {
if (s == "Unknown") { t = Layout::Unknown; return; }
if (s == "SparseCoo") { t = Layout::SparseCoo; return; }
if (s == "SparseCsr") { t = Layout::SparseCsr; return; }
if (s == "SparseCsc") { t = Layout::SparseCsc; return; }
if (s == "SparseBsr") { t = Layout::SparseBsr; return; }
if (s == "SparseBsc") { t = Layout::SparseBsc; return; }
if (s == "_mkldnn") { t = Layout::_mkldnn; return; }
if (s == "Strided") { t = Layout::Strided; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
enum class MemoryFormat {
Unknown = 0,
ContiguousFormat = 1,
ChannelsLast = 2,
ChannelsLast3d = 3,
PreserveFormat = 4,
};
inline std::string_view printEnum(const MemoryFormat& e) {
switch (e) {
case MemoryFormat::Unknown: return "Unknown";
case MemoryFormat::ContiguousFormat: return "ContiguousFormat";
case MemoryFormat::ChannelsLast: return "ChannelsLast";
case MemoryFormat::ChannelsLast3d: return "ChannelsLast3d";
case MemoryFormat::PreserveFormat: return "PreserveFormat";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, MemoryFormat& t) {
if (s == "Unknown") { t = MemoryFormat::Unknown; return; }
if (s == "ContiguousFormat") { t = MemoryFormat::ContiguousFormat; return; }
if (s == "ChannelsLast") { t = MemoryFormat::ChannelsLast; return; }
if (s == "ChannelsLast3d") { t = MemoryFormat::ChannelsLast3d; return; }
if (s == "PreserveFormat") { t = MemoryFormat::PreserveFormat; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
enum class ScalarType {
UNKNOWN = 0,
BYTE = 1,
CHAR = 2,
SHORT = 3,
INT = 4,
LONG = 5,
HALF = 6,
FLOAT = 7,
DOUBLE = 8,
COMPLEXHALF = 9,
COMPLEXFLOAT = 10,
COMPLEXDOUBLE = 11,
BOOL = 12,
BFLOAT16 = 13,
UINT16 = 28,
FLOAT8E4M3FN = 29,
FLOAT8E5M2 = 30,
};
inline std::string_view printEnum(const ScalarType& e) {
switch (e) {
case ScalarType::UNKNOWN: return "UNKNOWN";
case ScalarType::BYTE: return "BYTE";
case ScalarType::CHAR: return "CHAR";
case ScalarType::SHORT: return "SHORT";
case ScalarType::INT: return "INT";
case ScalarType::LONG: return "LONG";
case ScalarType::HALF: return "HALF";
case ScalarType::FLOAT: return "FLOAT";
case ScalarType::DOUBLE: return "DOUBLE";
case ScalarType::COMPLEXHALF: return "COMPLEXHALF";
case ScalarType::COMPLEXFLOAT: return "COMPLEXFLOAT";
case ScalarType::COMPLEXDOUBLE: return "COMPLEXDOUBLE";
case ScalarType::BOOL: return "BOOL";
case ScalarType::BFLOAT16: return "BFLOAT16";
case ScalarType::UINT16: return "UINT16";
case ScalarType::FLOAT8E4M3FN: return "FLOAT8E4M3FN";
case ScalarType::FLOAT8E5M2: return "FLOAT8E5M2";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, ScalarType& t) {
if (s == "UNKNOWN") { t = ScalarType::UNKNOWN; return; }
if (s == "BYTE") { t = ScalarType::BYTE; return; }
if (s == "CHAR") { t = ScalarType::CHAR; return; }
if (s == "SHORT") { t = ScalarType::SHORT; return; }
if (s == "INT") { t = ScalarType::INT; return; }
if (s == "LONG") { t = ScalarType::LONG; return; }
if (s == "HALF") { t = ScalarType::HALF; return; }
if (s == "FLOAT") { t = ScalarType::FLOAT; return; }
if (s == "DOUBLE") { t = ScalarType::DOUBLE; return; }
if (s == "COMPLEXHALF") { t = ScalarType::COMPLEXHALF; return; }
if (s == "COMPLEXFLOAT") { t = ScalarType::COMPLEXFLOAT; return; }
if (s == "COMPLEXDOUBLE") { t = ScalarType::COMPLEXDOUBLE; return; }
if (s == "BOOL") { t = ScalarType::BOOL; return; }
if (s == "BFLOAT16") { t = ScalarType::BFLOAT16; return; }
if (s == "UINT16") { t = ScalarType::UINT16; return; }
if (s == "FLOAT8E4M3FN") { t = ScalarType::FLOAT8E4M3FN; return; }
if (s == "FLOAT8E5M2") { t = ScalarType::FLOAT8E5M2; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
class Device {
private:
std::string type;
std::optional<int64_t> index = std::nullopt;
public:
const std::string& get_type() const {
return type;
}
void set_type(std::string def) {
type = std::move(def);
}
const std::optional<int64_t>& get_index() const {
return index;
}
void set_index(std::optional<int64_t> def) {
index = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const Device& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, Device& nlohmann_json_t);
};
class SymExprHint {
struct Void {};
public:
enum class Tag {
AS_INT, AS_BOOL, AS_FLOAT
};
private:
std::variant<Void, int64_t, bool, F64> variant_;
Tag tag_;
public:
Tag tag() const {
return tag_;
}
const int64_t& get_as_int() const {
return std::get<1>(variant_);
}
void set_as_int(int64_t def) {
variant_.emplace<1>(std::move(def));
tag_ = Tag::AS_INT;
}
const bool& get_as_bool() const {
return std::get<2>(variant_);
}
void set_as_bool(bool def) {
variant_.emplace<2>(std::move(def));
tag_ = Tag::AS_BOOL;
}
const F64& get_as_float() const {
return std::get<3>(variant_);
}
void set_as_float(F64 def) {
variant_.emplace<3>(std::move(def));
tag_ = Tag::AS_FLOAT;
}
friend void to_json(nlohmann::json& nlohmann_json_j, const SymExprHint& nlohmann_json_t) {
if (nlohmann_json_t.tag_ == Tag::AS_INT) {
nlohmann_json_j["as_int"] = nlohmann_json_t.get_as_int();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_BOOL) {
nlohmann_json_j["as_bool"] = nlohmann_json_t.get_as_bool();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_FLOAT) {
nlohmann_json_j["as_float"] = nlohmann_json_t.get_as_float();
return;
}
}
friend void from_json(const nlohmann::json& nlohmann_json_j, SymExprHint& nlohmann_json_t) {
if (nlohmann_json_j.contains("as_int")) {
nlohmann_json_t.variant_.emplace<1>(nlohmann_json_j.at("as_int").template get<int64_t>());
nlohmann_json_t.tag_ = Tag::AS_INT;
return;
}
if (nlohmann_json_j.contains("as_bool")) {
nlohmann_json_t.variant_.emplace<2>(nlohmann_json_j.at("as_bool").template get<bool>());
nlohmann_json_t.tag_ = Tag::AS_BOOL;
return;
}
if (nlohmann_json_j.contains("as_float")) {
nlohmann_json_t.variant_.emplace<3>(nlohmann_json_j.at("as_float").template get<F64>());
nlohmann_json_t.tag_ = Tag::AS_FLOAT;
return;
}
}
};
inline std::string_view printEnum(const SymExprHint::Tag& e) {
switch (e) {
case SymExprHint::Tag::AS_INT: return "AS_INT";
case SymExprHint::Tag::AS_BOOL: return "AS_BOOL";
case SymExprHint::Tag::AS_FLOAT: return "AS_FLOAT";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, SymExprHint::Tag& t) {
if (s == "AS_INT") { t = SymExprHint::Tag::AS_INT; return; }
if (s == "AS_BOOL") { t = SymExprHint::Tag::AS_BOOL; return; }
if (s == "AS_FLOAT") { t = SymExprHint::Tag::AS_FLOAT; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
class SymExpr {
private:
std::string expr_str;
std::optional<SymExprHint> hint = std::nullopt;
public:
const std::string& get_expr_str() const {
return expr_str;
}
void set_expr_str(std::string def) {
expr_str = std::move(def);
}
const std::optional<SymExprHint>& get_hint() const {
return hint;
}
void set_hint(std::optional<SymExprHint> def) {
hint = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const SymExpr& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, SymExpr& nlohmann_json_t);
};
class SymInt {
struct Void {};
public:
enum class Tag {
AS_EXPR, AS_INT
};
private:
std::variant<Void, SymExpr, int64_t> variant_;
Tag tag_;
public:
Tag tag() const {
return tag_;
}
const SymExpr& get_as_expr() const {
return std::get<1>(variant_);
}
void set_as_expr(SymExpr def) {
variant_.emplace<1>(std::move(def));
tag_ = Tag::AS_EXPR;
}
const int64_t& get_as_int() const {
return std::get<2>(variant_);
}
void set_as_int(int64_t def) {
variant_.emplace<2>(std::move(def));
tag_ = Tag::AS_INT;
}
friend void to_json(nlohmann::json& nlohmann_json_j, const SymInt& nlohmann_json_t) {
if (nlohmann_json_t.tag_ == Tag::AS_EXPR) {
nlohmann_json_j["as_expr"] = nlohmann_json_t.get_as_expr();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_INT) {
nlohmann_json_j["as_int"] = nlohmann_json_t.get_as_int();
return;
}
}
friend void from_json(const nlohmann::json& nlohmann_json_j, SymInt& nlohmann_json_t) {
if (nlohmann_json_j.contains("as_expr")) {
nlohmann_json_t.variant_.emplace<1>(nlohmann_json_j.at("as_expr").template get<SymExpr>());
nlohmann_json_t.tag_ = Tag::AS_EXPR;
return;
}
if (nlohmann_json_j.contains("as_int")) {
nlohmann_json_t.variant_.emplace<2>(nlohmann_json_j.at("as_int").template get<int64_t>());
nlohmann_json_t.tag_ = Tag::AS_INT;
return;
}
}
};
inline std::string_view printEnum(const SymInt::Tag& e) {
switch (e) {
case SymInt::Tag::AS_EXPR: return "AS_EXPR";
case SymInt::Tag::AS_INT: return "AS_INT";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, SymInt::Tag& t) {
if (s == "AS_EXPR") { t = SymInt::Tag::AS_EXPR; return; }
if (s == "AS_INT") { t = SymInt::Tag::AS_INT; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
class SymFloat {
struct Void {};
public:
enum class Tag {
AS_EXPR, AS_FLOAT
};
private:
std::variant<Void, SymExpr, F64> variant_;
Tag tag_;
public:
Tag tag() const {
return tag_;
}
const SymExpr& get_as_expr() const {
return std::get<1>(variant_);
}
void set_as_expr(SymExpr def) {
variant_.emplace<1>(std::move(def));
tag_ = Tag::AS_EXPR;
}
const F64& get_as_float() const {
return std::get<2>(variant_);
}
void set_as_float(F64 def) {
variant_.emplace<2>(std::move(def));
tag_ = Tag::AS_FLOAT;
}
friend void to_json(nlohmann::json& nlohmann_json_j, const SymFloat& nlohmann_json_t) {
if (nlohmann_json_t.tag_ == Tag::AS_EXPR) {
nlohmann_json_j["as_expr"] = nlohmann_json_t.get_as_expr();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_FLOAT) {
nlohmann_json_j["as_float"] = nlohmann_json_t.get_as_float();
return;
}
}
friend void from_json(const nlohmann::json& nlohmann_json_j, SymFloat& nlohmann_json_t) {
if (nlohmann_json_j.contains("as_expr")) {
nlohmann_json_t.variant_.emplace<1>(nlohmann_json_j.at("as_expr").template get<SymExpr>());
nlohmann_json_t.tag_ = Tag::AS_EXPR;
return;
}
if (nlohmann_json_j.contains("as_float")) {
nlohmann_json_t.variant_.emplace<2>(nlohmann_json_j.at("as_float").template get<F64>());
nlohmann_json_t.tag_ = Tag::AS_FLOAT;
return;
}
}
};
inline std::string_view printEnum(const SymFloat::Tag& e) {
switch (e) {
case SymFloat::Tag::AS_EXPR: return "AS_EXPR";
case SymFloat::Tag::AS_FLOAT: return "AS_FLOAT";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, SymFloat::Tag& t) {
if (s == "AS_EXPR") { t = SymFloat::Tag::AS_EXPR; return; }
if (s == "AS_FLOAT") { t = SymFloat::Tag::AS_FLOAT; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
class SymBool {
struct Void {};
public:
enum class Tag {
AS_EXPR, AS_BOOL
};
private:
std::variant<Void, SymExpr, bool> variant_;
Tag tag_;
public:
Tag tag() const {
return tag_;
}
const SymExpr& get_as_expr() const {
return std::get<1>(variant_);
}
void set_as_expr(SymExpr def) {
variant_.emplace<1>(std::move(def));
tag_ = Tag::AS_EXPR;
}
const bool& get_as_bool() const {
return std::get<2>(variant_);
}
void set_as_bool(bool def) {
variant_.emplace<2>(std::move(def));
tag_ = Tag::AS_BOOL;
}
friend void to_json(nlohmann::json& nlohmann_json_j, const SymBool& nlohmann_json_t) {
if (nlohmann_json_t.tag_ == Tag::AS_EXPR) {
nlohmann_json_j["as_expr"] = nlohmann_json_t.get_as_expr();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_BOOL) {
nlohmann_json_j["as_bool"] = nlohmann_json_t.get_as_bool();
return;
}
}
friend void from_json(const nlohmann::json& nlohmann_json_j, SymBool& nlohmann_json_t) {
if (nlohmann_json_j.contains("as_expr")) {
nlohmann_json_t.variant_.emplace<1>(nlohmann_json_j.at("as_expr").template get<SymExpr>());
nlohmann_json_t.tag_ = Tag::AS_EXPR;
return;
}
if (nlohmann_json_j.contains("as_bool")) {
nlohmann_json_t.variant_.emplace<2>(nlohmann_json_j.at("as_bool").template get<bool>());
nlohmann_json_t.tag_ = Tag::AS_BOOL;
return;
}
}
};
inline std::string_view printEnum(const SymBool::Tag& e) {
switch (e) {
case SymBool::Tag::AS_EXPR: return "AS_EXPR";
case SymBool::Tag::AS_BOOL: return "AS_BOOL";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, SymBool::Tag& t) {
if (s == "AS_EXPR") { t = SymBool::Tag::AS_EXPR; return; }
if (s == "AS_BOOL") { t = SymBool::Tag::AS_BOOL; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
class TensorMeta {
private:
int64_t dtype;
std::vector<SymInt> sizes;
bool requires_grad;
Device device;
std::vector<SymInt> strides;
SymInt storage_offset;
int64_t layout;
public:
ScalarType get_dtype() const {
return static_cast<ScalarType>(dtype);
}
void set_dtype(ScalarType def) {
dtype = static_cast<int64_t>(def);
}
const std::vector<SymInt>& get_sizes() const {
return sizes;
}
void set_sizes(std::vector<SymInt> def) {
sizes = std::move(def);
}
const bool& get_requires_grad() const {
return requires_grad;
}
void set_requires_grad(bool def) {
requires_grad = std::move(def);
}
const Device& get_device() const {
return device;
}
void set_device(Device def) {
device = std::move(def);
}
const std::vector<SymInt>& get_strides() const {
return strides;
}
void set_strides(std::vector<SymInt> def) {
strides = std::move(def);
}
const SymInt& get_storage_offset() const {
return storage_offset;
}
void set_storage_offset(SymInt def) {
storage_offset = std::move(def);
}
Layout get_layout() const {
return static_cast<Layout>(layout);
}
void set_layout(Layout def) {
layout = static_cast<int64_t>(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const TensorMeta& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, TensorMeta& nlohmann_json_t);
};
class SymIntArgument {
struct Void {};
public:
enum class Tag {
AS_NAME, AS_INT
};
private:
std::variant<Void, std::string, int64_t> variant_;
Tag tag_;
public:
Tag tag() const {
return tag_;
}
const std::string& get_as_name() const {
return std::get<1>(variant_);
}
void set_as_name(std::string def) {
variant_.emplace<1>(std::move(def));
tag_ = Tag::AS_NAME;
}
const int64_t& get_as_int() const {
return std::get<2>(variant_);
}
void set_as_int(int64_t def) {
variant_.emplace<2>(std::move(def));
tag_ = Tag::AS_INT;
}
friend void to_json(nlohmann::json& nlohmann_json_j, const SymIntArgument& nlohmann_json_t) {
if (nlohmann_json_t.tag_ == Tag::AS_NAME) {
nlohmann_json_j["as_name"] = nlohmann_json_t.get_as_name();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_INT) {
nlohmann_json_j["as_int"] = nlohmann_json_t.get_as_int();
return;
}
}
friend void from_json(const nlohmann::json& nlohmann_json_j, SymIntArgument& nlohmann_json_t) {
if (nlohmann_json_j.contains("as_name")) {
nlohmann_json_t.variant_.emplace<1>(nlohmann_json_j.at("as_name").template get<std::string>());
nlohmann_json_t.tag_ = Tag::AS_NAME;
return;
}
if (nlohmann_json_j.contains("as_int")) {
nlohmann_json_t.variant_.emplace<2>(nlohmann_json_j.at("as_int").template get<int64_t>());
nlohmann_json_t.tag_ = Tag::AS_INT;
return;
}
}
};
inline std::string_view printEnum(const SymIntArgument::Tag& e) {
switch (e) {
case SymIntArgument::Tag::AS_NAME: return "AS_NAME";
case SymIntArgument::Tag::AS_INT: return "AS_INT";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, SymIntArgument::Tag& t) {
if (s == "AS_NAME") { t = SymIntArgument::Tag::AS_NAME; return; }
if (s == "AS_INT") { t = SymIntArgument::Tag::AS_INT; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
class SymFloatArgument {
struct Void {};
public:
enum class Tag {
AS_NAME, AS_FLOAT
};
private:
std::variant<Void, std::string, F64> variant_;
Tag tag_;
public:
Tag tag() const {
return tag_;
}
const std::string& get_as_name() const {
return std::get<1>(variant_);
}
void set_as_name(std::string def) {
variant_.emplace<1>(std::move(def));
tag_ = Tag::AS_NAME;
}
const F64& get_as_float() const {
return std::get<2>(variant_);
}
void set_as_float(F64 def) {
variant_.emplace<2>(std::move(def));
tag_ = Tag::AS_FLOAT;
}
friend void to_json(nlohmann::json& nlohmann_json_j, const SymFloatArgument& nlohmann_json_t) {
if (nlohmann_json_t.tag_ == Tag::AS_NAME) {
nlohmann_json_j["as_name"] = nlohmann_json_t.get_as_name();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_FLOAT) {
nlohmann_json_j["as_float"] = nlohmann_json_t.get_as_float();
return;
}
}
friend void from_json(const nlohmann::json& nlohmann_json_j, SymFloatArgument& nlohmann_json_t) {
if (nlohmann_json_j.contains("as_name")) {
nlohmann_json_t.variant_.emplace<1>(nlohmann_json_j.at("as_name").template get<std::string>());
nlohmann_json_t.tag_ = Tag::AS_NAME;
return;
}
if (nlohmann_json_j.contains("as_float")) {
nlohmann_json_t.variant_.emplace<2>(nlohmann_json_j.at("as_float").template get<F64>());
nlohmann_json_t.tag_ = Tag::AS_FLOAT;
return;
}
}
};
inline std::string_view printEnum(const SymFloatArgument::Tag& e) {
switch (e) {
case SymFloatArgument::Tag::AS_NAME: return "AS_NAME";
case SymFloatArgument::Tag::AS_FLOAT: return "AS_FLOAT";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, SymFloatArgument::Tag& t) {
if (s == "AS_NAME") { t = SymFloatArgument::Tag::AS_NAME; return; }
if (s == "AS_FLOAT") { t = SymFloatArgument::Tag::AS_FLOAT; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
class SymBoolArgument {
struct Void {};
public:
enum class Tag {
AS_NAME, AS_BOOL
};
private:
std::variant<Void, std::string, bool> variant_;
Tag tag_;
public:
Tag tag() const {
return tag_;
}
const std::string& get_as_name() const {
return std::get<1>(variant_);
}
void set_as_name(std::string def) {
variant_.emplace<1>(std::move(def));
tag_ = Tag::AS_NAME;
}
const bool& get_as_bool() const {
return std::get<2>(variant_);
}
void set_as_bool(bool def) {
variant_.emplace<2>(std::move(def));
tag_ = Tag::AS_BOOL;
}
friend void to_json(nlohmann::json& nlohmann_json_j, const SymBoolArgument& nlohmann_json_t) {
if (nlohmann_json_t.tag_ == Tag::AS_NAME) {
nlohmann_json_j["as_name"] = nlohmann_json_t.get_as_name();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_BOOL) {
nlohmann_json_j["as_bool"] = nlohmann_json_t.get_as_bool();
return;
}
}
friend void from_json(const nlohmann::json& nlohmann_json_j, SymBoolArgument& nlohmann_json_t) {
if (nlohmann_json_j.contains("as_name")) {
nlohmann_json_t.variant_.emplace<1>(nlohmann_json_j.at("as_name").template get<std::string>());
nlohmann_json_t.tag_ = Tag::AS_NAME;
return;
}
if (nlohmann_json_j.contains("as_bool")) {
nlohmann_json_t.variant_.emplace<2>(nlohmann_json_j.at("as_bool").template get<bool>());
nlohmann_json_t.tag_ = Tag::AS_BOOL;
return;
}
}
};
inline std::string_view printEnum(const SymBoolArgument::Tag& e) {
switch (e) {
case SymBoolArgument::Tag::AS_NAME: return "AS_NAME";
case SymBoolArgument::Tag::AS_BOOL: return "AS_BOOL";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, SymBoolArgument::Tag& t) {
if (s == "AS_NAME") { t = SymBoolArgument::Tag::AS_NAME; return; }
if (s == "AS_BOOL") { t = SymBoolArgument::Tag::AS_BOOL; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
class TensorArgument {
private:
std::string name;
public:
const std::string& get_name() const {
return name;
}
void set_name(std::string def) {
name = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const TensorArgument& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, TensorArgument& nlohmann_json_t);
};
class TokenArgument {
private:
std::string name;
public:
const std::string& get_name() const {
return name;
}
void set_name(std::string def) {
name = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const TokenArgument& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, TokenArgument& nlohmann_json_t);
};
class OptionalTensorArgument {
struct Void {};
public:
enum class Tag {
AS_TENSOR, AS_NONE
};
private:
std::variant<Void, TensorArgument, bool> variant_;
Tag tag_;
public:
Tag tag() const {
return tag_;
}
const TensorArgument& get_as_tensor() const {
return std::get<1>(variant_);
}
void set_as_tensor(TensorArgument def) {
variant_.emplace<1>(std::move(def));
tag_ = Tag::AS_TENSOR;
}
const bool& get_as_none() const {
return std::get<2>(variant_);
}
void set_as_none(bool def) {
variant_.emplace<2>(std::move(def));
tag_ = Tag::AS_NONE;
}
friend void to_json(nlohmann::json& nlohmann_json_j, const OptionalTensorArgument& nlohmann_json_t) {
if (nlohmann_json_t.tag_ == Tag::AS_TENSOR) {
nlohmann_json_j["as_tensor"] = nlohmann_json_t.get_as_tensor();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_NONE) {
nlohmann_json_j["as_none"] = nlohmann_json_t.get_as_none();
return;
}
}
friend void from_json(const nlohmann::json& nlohmann_json_j, OptionalTensorArgument& nlohmann_json_t) {
if (nlohmann_json_j.contains("as_tensor")) {
nlohmann_json_t.variant_.emplace<1>(nlohmann_json_j.at("as_tensor").template get<TensorArgument>());
nlohmann_json_t.tag_ = Tag::AS_TENSOR;
return;
}
if (nlohmann_json_j.contains("as_none")) {
nlohmann_json_t.variant_.emplace<2>(nlohmann_json_j.at("as_none").template get<bool>());
nlohmann_json_t.tag_ = Tag::AS_NONE;
return;
}
}
};
inline std::string_view printEnum(const OptionalTensorArgument::Tag& e) {
switch (e) {
case OptionalTensorArgument::Tag::AS_TENSOR: return "AS_TENSOR";
case OptionalTensorArgument::Tag::AS_NONE: return "AS_NONE";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, OptionalTensorArgument::Tag& t) {
if (s == "AS_TENSOR") { t = OptionalTensorArgument::Tag::AS_TENSOR; return; }
if (s == "AS_NONE") { t = OptionalTensorArgument::Tag::AS_NONE; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
class GraphArgument {
private:
std::string name;
ForwardRef<Graph> graph;
public:
const std::string& get_name() const {
return name;
}
void set_name(std::string def) {
name = std::move(def);
}
const ForwardRef<Graph>& get_graph() const {
return graph;
}
void set_graph(ForwardRef<Graph> def) {
graph = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const GraphArgument& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, GraphArgument& nlohmann_json_t);
};
class CustomObjArgument {
private:
std::string name;
std::string class_fqn;
public:
const std::string& get_name() const {
return name;
}
void set_name(std::string def) {
name = std::move(def);
}
const std::string& get_class_fqn() const {
return class_fqn;
}
void set_class_fqn(std::string def) {
class_fqn = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const CustomObjArgument& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, CustomObjArgument& nlohmann_json_t);
};
class Argument {
struct Void {};
public:
enum class Tag {
AS_NONE, AS_TENSOR, AS_TENSORS, AS_INT, AS_INTS, AS_FLOAT, AS_FLOATS, AS_STRING, AS_STRINGS, AS_SYM_INT, AS_SYM_INTS, AS_SCALAR_TYPE, AS_MEMORY_FORMAT, AS_LAYOUT, AS_DEVICE, AS_BOOL, AS_BOOLS, AS_SYM_BOOL, AS_SYM_BOOLS, AS_GRAPH, AS_OPTIONAL_TENSORS, AS_CUSTOM_OBJ, AS_OPERATOR, AS_SYM_FLOAT, AS_SYM_FLOATS
};
private:
std::variant<Void, bool, TensorArgument, std::vector<TensorArgument>, int64_t, std::vector<int64_t>, F64, std::vector<F64>, std::string, std::vector<std::string>, SymIntArgument, std::vector<SymIntArgument>, ScalarType, MemoryFormat, Layout, Device, bool, std::vector<bool>, SymBoolArgument, std::vector<SymBoolArgument>, GraphArgument, std::vector<OptionalTensorArgument>, CustomObjArgument, std::string, SymFloatArgument, std::vector<SymFloatArgument>> variant_;
Tag tag_;
public:
Tag tag() const {
return tag_;
}
const bool& get_as_none() const {
return std::get<1>(variant_);
}
void set_as_none(bool def) {
variant_.emplace<1>(std::move(def));
tag_ = Tag::AS_NONE;
}
const TensorArgument& get_as_tensor() const {
return std::get<2>(variant_);
}
void set_as_tensor(TensorArgument def) {
variant_.emplace<2>(std::move(def));
tag_ = Tag::AS_TENSOR;
}
const std::vector<TensorArgument>& get_as_tensors() const {
return std::get<3>(variant_);
}
void set_as_tensors(std::vector<TensorArgument> def) {
variant_.emplace<3>(std::move(def));
tag_ = Tag::AS_TENSORS;
}
const int64_t& get_as_int() const {
return std::get<4>(variant_);
}
void set_as_int(int64_t def) {
variant_.emplace<4>(std::move(def));
tag_ = Tag::AS_INT;
}
const std::vector<int64_t>& get_as_ints() const {
return std::get<5>(variant_);
}
void set_as_ints(std::vector<int64_t> def) {
variant_.emplace<5>(std::move(def));
tag_ = Tag::AS_INTS;
}
const F64& get_as_float() const {
return std::get<6>(variant_);
}
void set_as_float(F64 def) {
variant_.emplace<6>(std::move(def));
tag_ = Tag::AS_FLOAT;
}
const std::vector<F64>& get_as_floats() const {
return std::get<7>(variant_);
}
void set_as_floats(std::vector<F64> def) {
variant_.emplace<7>(std::move(def));
tag_ = Tag::AS_FLOATS;
}
const std::string& get_as_string() const {
return std::get<8>(variant_);
}
void set_as_string(std::string def) {
variant_.emplace<8>(std::move(def));
tag_ = Tag::AS_STRING;
}
const std::vector<std::string>& get_as_strings() const {
return std::get<9>(variant_);
}
void set_as_strings(std::vector<std::string> def) {
variant_.emplace<9>(std::move(def));
tag_ = Tag::AS_STRINGS;
}
const SymIntArgument& get_as_sym_int() const {
return std::get<10>(variant_);
}
void set_as_sym_int(SymIntArgument def) {
variant_.emplace<10>(std::move(def));
tag_ = Tag::AS_SYM_INT;
}
const std::vector<SymIntArgument>& get_as_sym_ints() const {
return std::get<11>(variant_);
}
void set_as_sym_ints(std::vector<SymIntArgument> def) {
variant_.emplace<11>(std::move(def));
tag_ = Tag::AS_SYM_INTS;
}
const ScalarType& get_as_scalar_type() const {
return std::get<12>(variant_);
}
void set_as_scalar_type(ScalarType def) {
variant_.emplace<12>(std::move(def));
tag_ = Tag::AS_SCALAR_TYPE;
}
const MemoryFormat& get_as_memory_format() const {
return std::get<13>(variant_);
}
void set_as_memory_format(MemoryFormat def) {
variant_.emplace<13>(std::move(def));
tag_ = Tag::AS_MEMORY_FORMAT;
}
const Layout& get_as_layout() const {
return std::get<14>(variant_);
}
void set_as_layout(Layout def) {
variant_.emplace<14>(std::move(def));
tag_ = Tag::AS_LAYOUT;
}
const Device& get_as_device() const {
return std::get<15>(variant_);
}
void set_as_device(Device def) {
variant_.emplace<15>(std::move(def));
tag_ = Tag::AS_DEVICE;
}
const bool& get_as_bool() const {
return std::get<16>(variant_);
}
void set_as_bool(bool def) {
variant_.emplace<16>(std::move(def));
tag_ = Tag::AS_BOOL;
}
const std::vector<bool>& get_as_bools() const {
return std::get<17>(variant_);
}
void set_as_bools(std::vector<bool> def) {
variant_.emplace<17>(std::move(def));
tag_ = Tag::AS_BOOLS;
}
const SymBoolArgument& get_as_sym_bool() const {
return std::get<18>(variant_);
}
void set_as_sym_bool(SymBoolArgument def) {
variant_.emplace<18>(std::move(def));
tag_ = Tag::AS_SYM_BOOL;
}
const std::vector<SymBoolArgument>& get_as_sym_bools() const {
return std::get<19>(variant_);
}
void set_as_sym_bools(std::vector<SymBoolArgument> def) {
variant_.emplace<19>(std::move(def));
tag_ = Tag::AS_SYM_BOOLS;
}
const GraphArgument& get_as_graph() const {
return std::get<20>(variant_);
}
void set_as_graph(GraphArgument def) {
variant_.emplace<20>(std::move(def));
tag_ = Tag::AS_GRAPH;
}
const std::vector<OptionalTensorArgument>& get_as_optional_tensors() const {
return std::get<21>(variant_);
}
void set_as_optional_tensors(std::vector<OptionalTensorArgument> def) {
variant_.emplace<21>(std::move(def));
tag_ = Tag::AS_OPTIONAL_TENSORS;
}
const CustomObjArgument& get_as_custom_obj() const {
return std::get<22>(variant_);
}
void set_as_custom_obj(CustomObjArgument def) {
variant_.emplace<22>(std::move(def));
tag_ = Tag::AS_CUSTOM_OBJ;
}
const std::string& get_as_operator() const {
return std::get<23>(variant_);
}
void set_as_operator(std::string def) {
variant_.emplace<23>(std::move(def));
tag_ = Tag::AS_OPERATOR;
}
const SymFloatArgument& get_as_sym_float() const {
return std::get<24>(variant_);
}
void set_as_sym_float(SymFloatArgument def) {
variant_.emplace<24>(std::move(def));
tag_ = Tag::AS_SYM_FLOAT;
}
const std::vector<SymFloatArgument>& get_as_sym_floats() const {
return std::get<25>(variant_);
}
void set_as_sym_floats(std::vector<SymFloatArgument> def) {
variant_.emplace<25>(std::move(def));
tag_ = Tag::AS_SYM_FLOATS;
}
friend void to_json(nlohmann::json& nlohmann_json_j, const Argument& nlohmann_json_t) {
if (nlohmann_json_t.tag_ == Tag::AS_NONE) {
nlohmann_json_j["as_none"] = nlohmann_json_t.get_as_none();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_TENSOR) {
nlohmann_json_j["as_tensor"] = nlohmann_json_t.get_as_tensor();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_TENSORS) {
nlohmann_json_j["as_tensors"] = nlohmann_json_t.get_as_tensors();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_INT) {
nlohmann_json_j["as_int"] = nlohmann_json_t.get_as_int();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_INTS) {
nlohmann_json_j["as_ints"] = nlohmann_json_t.get_as_ints();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_FLOAT) {
nlohmann_json_j["as_float"] = nlohmann_json_t.get_as_float();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_FLOATS) {
nlohmann_json_j["as_floats"] = nlohmann_json_t.get_as_floats();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_STRING) {
nlohmann_json_j["as_string"] = nlohmann_json_t.get_as_string();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_STRINGS) {
nlohmann_json_j["as_strings"] = nlohmann_json_t.get_as_strings();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_SYM_INT) {
nlohmann_json_j["as_sym_int"] = nlohmann_json_t.get_as_sym_int();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_SYM_INTS) {
nlohmann_json_j["as_sym_ints"] = nlohmann_json_t.get_as_sym_ints();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_SCALAR_TYPE) {
nlohmann_json_j["as_scalar_type"] = nlohmann_json_t.get_as_scalar_type();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_MEMORY_FORMAT) {
nlohmann_json_j["as_memory_format"] = nlohmann_json_t.get_as_memory_format();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_LAYOUT) {
nlohmann_json_j["as_layout"] = nlohmann_json_t.get_as_layout();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_DEVICE) {
nlohmann_json_j["as_device"] = nlohmann_json_t.get_as_device();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_BOOL) {
nlohmann_json_j["as_bool"] = nlohmann_json_t.get_as_bool();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_BOOLS) {
nlohmann_json_j["as_bools"] = nlohmann_json_t.get_as_bools();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_SYM_BOOL) {
nlohmann_json_j["as_sym_bool"] = nlohmann_json_t.get_as_sym_bool();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_SYM_BOOLS) {
nlohmann_json_j["as_sym_bools"] = nlohmann_json_t.get_as_sym_bools();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_GRAPH) {
nlohmann_json_j["as_graph"] = nlohmann_json_t.get_as_graph();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_OPTIONAL_TENSORS) {
nlohmann_json_j["as_optional_tensors"] = nlohmann_json_t.get_as_optional_tensors();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_CUSTOM_OBJ) {
nlohmann_json_j["as_custom_obj"] = nlohmann_json_t.get_as_custom_obj();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_OPERATOR) {
nlohmann_json_j["as_operator"] = nlohmann_json_t.get_as_operator();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_SYM_FLOAT) {
nlohmann_json_j["as_sym_float"] = nlohmann_json_t.get_as_sym_float();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_SYM_FLOATS) {
nlohmann_json_j["as_sym_floats"] = nlohmann_json_t.get_as_sym_floats();
return;
}
}
friend void from_json(const nlohmann::json& nlohmann_json_j, Argument& nlohmann_json_t) {
if (nlohmann_json_j.contains("as_none")) {
nlohmann_json_t.variant_.emplace<1>(nlohmann_json_j.at("as_none").template get<bool>());
nlohmann_json_t.tag_ = Tag::AS_NONE;
return;
}
if (nlohmann_json_j.contains("as_tensor")) {
nlohmann_json_t.variant_.emplace<2>(nlohmann_json_j.at("as_tensor").template get<TensorArgument>());
nlohmann_json_t.tag_ = Tag::AS_TENSOR;
return;
}
if (nlohmann_json_j.contains("as_tensors")) {
nlohmann_json_t.variant_.emplace<3>(nlohmann_json_j.at("as_tensors").template get<std::vector<TensorArgument>>());
nlohmann_json_t.tag_ = Tag::AS_TENSORS;
return;
}
if (nlohmann_json_j.contains("as_int")) {
nlohmann_json_t.variant_.emplace<4>(nlohmann_json_j.at("as_int").template get<int64_t>());
nlohmann_json_t.tag_ = Tag::AS_INT;
return;
}
if (nlohmann_json_j.contains("as_ints")) {
nlohmann_json_t.variant_.emplace<5>(nlohmann_json_j.at("as_ints").template get<std::vector<int64_t>>());
nlohmann_json_t.tag_ = Tag::AS_INTS;
return;
}
if (nlohmann_json_j.contains("as_float")) {
nlohmann_json_t.variant_.emplace<6>(nlohmann_json_j.at("as_float").template get<F64>());
nlohmann_json_t.tag_ = Tag::AS_FLOAT;
return;
}
if (nlohmann_json_j.contains("as_floats")) {
nlohmann_json_t.variant_.emplace<7>(nlohmann_json_j.at("as_floats").template get<std::vector<F64>>());
nlohmann_json_t.tag_ = Tag::AS_FLOATS;
return;
}
if (nlohmann_json_j.contains("as_string")) {
nlohmann_json_t.variant_.emplace<8>(nlohmann_json_j.at("as_string").template get<std::string>());
nlohmann_json_t.tag_ = Tag::AS_STRING;
return;
}
if (nlohmann_json_j.contains("as_strings")) {
nlohmann_json_t.variant_.emplace<9>(nlohmann_json_j.at("as_strings").template get<std::vector<std::string>>());
nlohmann_json_t.tag_ = Tag::AS_STRINGS;
return;
}
if (nlohmann_json_j.contains("as_sym_int")) {
nlohmann_json_t.variant_.emplace<10>(nlohmann_json_j.at("as_sym_int").template get<SymIntArgument>());
nlohmann_json_t.tag_ = Tag::AS_SYM_INT;
return;
}
if (nlohmann_json_j.contains("as_sym_ints")) {
nlohmann_json_t.variant_.emplace<11>(nlohmann_json_j.at("as_sym_ints").template get<std::vector<SymIntArgument>>());
nlohmann_json_t.tag_ = Tag::AS_SYM_INTS;
return;
}
if (nlohmann_json_j.contains("as_scalar_type")) {
nlohmann_json_t.variant_.emplace<12>(nlohmann_json_j.at("as_scalar_type").template get<ScalarType>());
nlohmann_json_t.tag_ = Tag::AS_SCALAR_TYPE;
return;
}
if (nlohmann_json_j.contains("as_memory_format")) {
nlohmann_json_t.variant_.emplace<13>(nlohmann_json_j.at("as_memory_format").template get<MemoryFormat>());
nlohmann_json_t.tag_ = Tag::AS_MEMORY_FORMAT;
return;
}
if (nlohmann_json_j.contains("as_layout")) {
nlohmann_json_t.variant_.emplace<14>(nlohmann_json_j.at("as_layout").template get<Layout>());
nlohmann_json_t.tag_ = Tag::AS_LAYOUT;
return;
}
if (nlohmann_json_j.contains("as_device")) {
nlohmann_json_t.variant_.emplace<15>(nlohmann_json_j.at("as_device").template get<Device>());
nlohmann_json_t.tag_ = Tag::AS_DEVICE;
return;
}
if (nlohmann_json_j.contains("as_bool")) {
nlohmann_json_t.variant_.emplace<16>(nlohmann_json_j.at("as_bool").template get<bool>());
nlohmann_json_t.tag_ = Tag::AS_BOOL;
return;
}
if (nlohmann_json_j.contains("as_bools")) {
nlohmann_json_t.variant_.emplace<17>(nlohmann_json_j.at("as_bools").template get<std::vector<bool>>());
nlohmann_json_t.tag_ = Tag::AS_BOOLS;
return;
}
if (nlohmann_json_j.contains("as_sym_bool")) {
nlohmann_json_t.variant_.emplace<18>(nlohmann_json_j.at("as_sym_bool").template get<SymBoolArgument>());
nlohmann_json_t.tag_ = Tag::AS_SYM_BOOL;
return;
}
if (nlohmann_json_j.contains("as_sym_bools")) {
nlohmann_json_t.variant_.emplace<19>(nlohmann_json_j.at("as_sym_bools").template get<std::vector<SymBoolArgument>>());
nlohmann_json_t.tag_ = Tag::AS_SYM_BOOLS;
return;
}
if (nlohmann_json_j.contains("as_graph")) {
nlohmann_json_t.variant_.emplace<20>(nlohmann_json_j.at("as_graph").template get<GraphArgument>());
nlohmann_json_t.tag_ = Tag::AS_GRAPH;
return;
}
if (nlohmann_json_j.contains("as_optional_tensors")) {
nlohmann_json_t.variant_.emplace<21>(nlohmann_json_j.at("as_optional_tensors").template get<std::vector<OptionalTensorArgument>>());
nlohmann_json_t.tag_ = Tag::AS_OPTIONAL_TENSORS;
return;
}
if (nlohmann_json_j.contains("as_custom_obj")) {
nlohmann_json_t.variant_.emplace<22>(nlohmann_json_j.at("as_custom_obj").template get<CustomObjArgument>());
nlohmann_json_t.tag_ = Tag::AS_CUSTOM_OBJ;
return;
}
if (nlohmann_json_j.contains("as_operator")) {
nlohmann_json_t.variant_.emplace<23>(nlohmann_json_j.at("as_operator").template get<std::string>());
nlohmann_json_t.tag_ = Tag::AS_OPERATOR;
return;
}
if (nlohmann_json_j.contains("as_sym_float")) {
nlohmann_json_t.variant_.emplace<24>(nlohmann_json_j.at("as_sym_float").template get<SymFloatArgument>());
nlohmann_json_t.tag_ = Tag::AS_SYM_FLOAT;
return;
}
if (nlohmann_json_j.contains("as_sym_floats")) {
nlohmann_json_t.variant_.emplace<25>(nlohmann_json_j.at("as_sym_floats").template get<std::vector<SymFloatArgument>>());
nlohmann_json_t.tag_ = Tag::AS_SYM_FLOATS;
return;
}
}
};
inline std::string_view printEnum(const Argument::Tag& e) {
switch (e) {
case Argument::Tag::AS_NONE: return "AS_NONE";
case Argument::Tag::AS_TENSOR: return "AS_TENSOR";
case Argument::Tag::AS_TENSORS: return "AS_TENSORS";
case Argument::Tag::AS_INT: return "AS_INT";
case Argument::Tag::AS_INTS: return "AS_INTS";
case Argument::Tag::AS_FLOAT: return "AS_FLOAT";
case Argument::Tag::AS_FLOATS: return "AS_FLOATS";
case Argument::Tag::AS_STRING: return "AS_STRING";
case Argument::Tag::AS_STRINGS: return "AS_STRINGS";
case Argument::Tag::AS_SYM_INT: return "AS_SYM_INT";
case Argument::Tag::AS_SYM_INTS: return "AS_SYM_INTS";
case Argument::Tag::AS_SCALAR_TYPE: return "AS_SCALAR_TYPE";
case Argument::Tag::AS_MEMORY_FORMAT: return "AS_MEMORY_FORMAT";
case Argument::Tag::AS_LAYOUT: return "AS_LAYOUT";
case Argument::Tag::AS_DEVICE: return "AS_DEVICE";
case Argument::Tag::AS_BOOL: return "AS_BOOL";
case Argument::Tag::AS_BOOLS: return "AS_BOOLS";
case Argument::Tag::AS_SYM_BOOL: return "AS_SYM_BOOL";
case Argument::Tag::AS_SYM_BOOLS: return "AS_SYM_BOOLS";
case Argument::Tag::AS_GRAPH: return "AS_GRAPH";
case Argument::Tag::AS_OPTIONAL_TENSORS: return "AS_OPTIONAL_TENSORS";
case Argument::Tag::AS_CUSTOM_OBJ: return "AS_CUSTOM_OBJ";
case Argument::Tag::AS_OPERATOR: return "AS_OPERATOR";
case Argument::Tag::AS_SYM_FLOAT: return "AS_SYM_FLOAT";
case Argument::Tag::AS_SYM_FLOATS: return "AS_SYM_FLOATS";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, Argument::Tag& t) {
if (s == "AS_NONE") { t = Argument::Tag::AS_NONE; return; }
if (s == "AS_TENSOR") { t = Argument::Tag::AS_TENSOR; return; }
if (s == "AS_TENSORS") { t = Argument::Tag::AS_TENSORS; return; }
if (s == "AS_INT") { t = Argument::Tag::AS_INT; return; }
if (s == "AS_INTS") { t = Argument::Tag::AS_INTS; return; }
if (s == "AS_FLOAT") { t = Argument::Tag::AS_FLOAT; return; }
if (s == "AS_FLOATS") { t = Argument::Tag::AS_FLOATS; return; }
if (s == "AS_STRING") { t = Argument::Tag::AS_STRING; return; }
if (s == "AS_STRINGS") { t = Argument::Tag::AS_STRINGS; return; }
if (s == "AS_SYM_INT") { t = Argument::Tag::AS_SYM_INT; return; }
if (s == "AS_SYM_INTS") { t = Argument::Tag::AS_SYM_INTS; return; }
if (s == "AS_SCALAR_TYPE") { t = Argument::Tag::AS_SCALAR_TYPE; return; }
if (s == "AS_MEMORY_FORMAT") { t = Argument::Tag::AS_MEMORY_FORMAT; return; }
if (s == "AS_LAYOUT") { t = Argument::Tag::AS_LAYOUT; return; }
if (s == "AS_DEVICE") { t = Argument::Tag::AS_DEVICE; return; }
if (s == "AS_BOOL") { t = Argument::Tag::AS_BOOL; return; }
if (s == "AS_BOOLS") { t = Argument::Tag::AS_BOOLS; return; }
if (s == "AS_SYM_BOOL") { t = Argument::Tag::AS_SYM_BOOL; return; }
if (s == "AS_SYM_BOOLS") { t = Argument::Tag::AS_SYM_BOOLS; return; }
if (s == "AS_GRAPH") { t = Argument::Tag::AS_GRAPH; return; }
if (s == "AS_OPTIONAL_TENSORS") { t = Argument::Tag::AS_OPTIONAL_TENSORS; return; }
if (s == "AS_CUSTOM_OBJ") { t = Argument::Tag::AS_CUSTOM_OBJ; return; }
if (s == "AS_OPERATOR") { t = Argument::Tag::AS_OPERATOR; return; }
if (s == "AS_SYM_FLOAT") { t = Argument::Tag::AS_SYM_FLOAT; return; }
if (s == "AS_SYM_FLOATS") { t = Argument::Tag::AS_SYM_FLOATS; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
class NamedArgument {
private:
std::string name;
Argument arg;
std::optional<int64_t> kind = std::nullopt;
public:
const std::string& get_name() const {
return name;
}
void set_name(std::string def) {
name = std::move(def);
}
const Argument& get_arg() const {
return arg;
}
void set_arg(Argument def) {
arg = std::move(def);
}
const std::optional<int64_t>& get_kind() const {
return kind;
}
void set_kind(std::optional<int64_t> def) {
kind = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const NamedArgument& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, NamedArgument& nlohmann_json_t);
};
class Node {
private:
std::string target;
std::vector<NamedArgument> inputs;
std::vector<Argument> outputs;
std::unordered_map<std::string, std::string> metadata;
std::optional<bool> is_hop_single_tensor_return = std::nullopt;
public:
const std::string& get_target() const {
return target;
}
void set_target(std::string def) {
target = std::move(def);
}
const std::vector<NamedArgument>& get_inputs() const {
return inputs;
}
void set_inputs(std::vector<NamedArgument> def) {
inputs = std::move(def);
}
const std::vector<Argument>& get_outputs() const {
return outputs;
}
void set_outputs(std::vector<Argument> def) {
outputs = std::move(def);
}
const std::unordered_map<std::string, std::string>& get_metadata() const {
return metadata;
}
void set_metadata(std::unordered_map<std::string, std::string> def) {
metadata = std::move(def);
}
const std::optional<bool>& get_is_hop_single_tensor_return() const {
return is_hop_single_tensor_return;
}
void set_is_hop_single_tensor_return(std::optional<bool> def) {
is_hop_single_tensor_return = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const Node& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, Node& nlohmann_json_t);
};
class Graph {
private:
std::vector<Argument> inputs;
std::vector<Argument> outputs;
std::vector<Node> nodes;
std::unordered_map<std::string, TensorMeta> tensor_values;
std::unordered_map<std::string, SymInt> sym_int_values;
std::unordered_map<std::string, SymBool> sym_bool_values;
bool is_single_tensor_return = false;
std::unordered_map<std::string, CustomObjArgument> custom_obj_values = {};
std::unordered_map<std::string, SymFloat> sym_float_values = {};
public:
const std::vector<Argument>& get_inputs() const {
return inputs;
}
void set_inputs(std::vector<Argument> def) {
inputs = std::move(def);
}
const std::vector<Argument>& get_outputs() const {
return outputs;
}
void set_outputs(std::vector<Argument> def) {
outputs = std::move(def);
}
const std::vector<Node>& get_nodes() const {
return nodes;
}
void set_nodes(std::vector<Node> def) {
nodes = std::move(def);
}
const std::unordered_map<std::string, TensorMeta>& get_tensor_values() const {
return tensor_values;
}
void set_tensor_values(std::unordered_map<std::string, TensorMeta> def) {
tensor_values = std::move(def);
}
const std::unordered_map<std::string, SymInt>& get_sym_int_values() const {
return sym_int_values;
}
void set_sym_int_values(std::unordered_map<std::string, SymInt> def) {
sym_int_values = std::move(def);
}
const std::unordered_map<std::string, SymBool>& get_sym_bool_values() const {
return sym_bool_values;
}
void set_sym_bool_values(std::unordered_map<std::string, SymBool> def) {
sym_bool_values = std::move(def);
}
const bool& get_is_single_tensor_return() const {
return is_single_tensor_return;
}
void set_is_single_tensor_return(bool def) {
is_single_tensor_return = std::move(def);
}
const std::unordered_map<std::string, CustomObjArgument>& get_custom_obj_values() const {
return custom_obj_values;
}
void set_custom_obj_values(std::unordered_map<std::string, CustomObjArgument> def) {
custom_obj_values = std::move(def);
}
const std::unordered_map<std::string, SymFloat>& get_sym_float_values() const {
return sym_float_values;
}
void set_sym_float_values(std::unordered_map<std::string, SymFloat> def) {
sym_float_values = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const Graph& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, Graph& nlohmann_json_t);
};
class UserInputSpec {
private:
Argument arg;
public:
const Argument& get_arg() const {
return arg;
}
void set_arg(Argument def) {
arg = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const UserInputSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, UserInputSpec& nlohmann_json_t);
};
class ConstantValue {
struct Void {};
public:
enum class Tag {
AS_NONE, AS_INT, AS_FLOAT, AS_STRING, AS_BOOL
};
private:
std::variant<Void, bool, int64_t, F64, std::string, bool> variant_;
Tag tag_;
public:
Tag tag() const {
return tag_;
}
const bool& get_as_none() const {
return std::get<1>(variant_);
}
void set_as_none(bool def) {
variant_.emplace<1>(std::move(def));
tag_ = Tag::AS_NONE;
}
const int64_t& get_as_int() const {
return std::get<2>(variant_);
}
void set_as_int(int64_t def) {
variant_.emplace<2>(std::move(def));
tag_ = Tag::AS_INT;
}
const F64& get_as_float() const {
return std::get<3>(variant_);
}
void set_as_float(F64 def) {
variant_.emplace<3>(std::move(def));
tag_ = Tag::AS_FLOAT;
}
const std::string& get_as_string() const {
return std::get<4>(variant_);
}
void set_as_string(std::string def) {
variant_.emplace<4>(std::move(def));
tag_ = Tag::AS_STRING;
}
const bool& get_as_bool() const {
return std::get<5>(variant_);
}
void set_as_bool(bool def) {
variant_.emplace<5>(std::move(def));
tag_ = Tag::AS_BOOL;
}
friend void to_json(nlohmann::json& nlohmann_json_j, const ConstantValue& nlohmann_json_t) {
if (nlohmann_json_t.tag_ == Tag::AS_NONE) {
nlohmann_json_j["as_none"] = nlohmann_json_t.get_as_none();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_INT) {
nlohmann_json_j["as_int"] = nlohmann_json_t.get_as_int();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_FLOAT) {
nlohmann_json_j["as_float"] = nlohmann_json_t.get_as_float();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_STRING) {
nlohmann_json_j["as_string"] = nlohmann_json_t.get_as_string();
return;
}
if (nlohmann_json_t.tag_ == Tag::AS_BOOL) {
nlohmann_json_j["as_bool"] = nlohmann_json_t.get_as_bool();
return;
}
}
friend void from_json(const nlohmann::json& nlohmann_json_j, ConstantValue& nlohmann_json_t) {
if (nlohmann_json_j.contains("as_none")) {
nlohmann_json_t.variant_.emplace<1>(nlohmann_json_j.at("as_none").template get<bool>());
nlohmann_json_t.tag_ = Tag::AS_NONE;
return;
}
if (nlohmann_json_j.contains("as_int")) {
nlohmann_json_t.variant_.emplace<2>(nlohmann_json_j.at("as_int").template get<int64_t>());
nlohmann_json_t.tag_ = Tag::AS_INT;
return;
}
if (nlohmann_json_j.contains("as_float")) {
nlohmann_json_t.variant_.emplace<3>(nlohmann_json_j.at("as_float").template get<F64>());
nlohmann_json_t.tag_ = Tag::AS_FLOAT;
return;
}
if (nlohmann_json_j.contains("as_string")) {
nlohmann_json_t.variant_.emplace<4>(nlohmann_json_j.at("as_string").template get<std::string>());
nlohmann_json_t.tag_ = Tag::AS_STRING;
return;
}
if (nlohmann_json_j.contains("as_bool")) {
nlohmann_json_t.variant_.emplace<5>(nlohmann_json_j.at("as_bool").template get<bool>());
nlohmann_json_t.tag_ = Tag::AS_BOOL;
return;
}
}
};
inline std::string_view printEnum(const ConstantValue::Tag& e) {
switch (e) {
case ConstantValue::Tag::AS_NONE: return "AS_NONE";
case ConstantValue::Tag::AS_INT: return "AS_INT";
case ConstantValue::Tag::AS_FLOAT: return "AS_FLOAT";
case ConstantValue::Tag::AS_STRING: return "AS_STRING";
case ConstantValue::Tag::AS_BOOL: return "AS_BOOL";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, ConstantValue::Tag& t) {
if (s == "AS_NONE") { t = ConstantValue::Tag::AS_NONE; return; }
if (s == "AS_INT") { t = ConstantValue::Tag::AS_INT; return; }
if (s == "AS_FLOAT") { t = ConstantValue::Tag::AS_FLOAT; return; }
if (s == "AS_STRING") { t = ConstantValue::Tag::AS_STRING; return; }
if (s == "AS_BOOL") { t = ConstantValue::Tag::AS_BOOL; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
class InputToConstantInputSpec {
private:
std::string name;
ConstantValue value;
public:
const std::string& get_name() const {
return name;
}
void set_name(std::string def) {
name = std::move(def);
}
const ConstantValue& get_value() const {
return value;
}
void set_value(ConstantValue def) {
value = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const InputToConstantInputSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, InputToConstantInputSpec& nlohmann_json_t);
};
class InputToParameterSpec {
private:
TensorArgument arg;
std::string parameter_name;
public:
const TensorArgument& get_arg() const {
return arg;
}
void set_arg(TensorArgument def) {
arg = std::move(def);
}
const std::string& get_parameter_name() const {
return parameter_name;
}
void set_parameter_name(std::string def) {
parameter_name = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const InputToParameterSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, InputToParameterSpec& nlohmann_json_t);
};
class InputToBufferSpec {
private:
TensorArgument arg;
std::string buffer_name;
bool persistent;
public:
const TensorArgument& get_arg() const {
return arg;
}
void set_arg(TensorArgument def) {
arg = std::move(def);
}
const std::string& get_buffer_name() const {
return buffer_name;
}
void set_buffer_name(std::string def) {
buffer_name = std::move(def);
}
const bool& get_persistent() const {
return persistent;
}
void set_persistent(bool def) {
persistent = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const InputToBufferSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, InputToBufferSpec& nlohmann_json_t);
};
class InputToTensorConstantSpec {
private:
TensorArgument arg;
std::string tensor_constant_name;
public:
const TensorArgument& get_arg() const {
return arg;
}
void set_arg(TensorArgument def) {
arg = std::move(def);
}
const std::string& get_tensor_constant_name() const {
return tensor_constant_name;
}
void set_tensor_constant_name(std::string def) {
tensor_constant_name = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const InputToTensorConstantSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, InputToTensorConstantSpec& nlohmann_json_t);
};
class InputToCustomObjSpec {
private:
CustomObjArgument arg;
std::string custom_obj_name;
public:
const CustomObjArgument& get_arg() const {
return arg;
}
void set_arg(CustomObjArgument def) {
arg = std::move(def);
}
const std::string& get_custom_obj_name() const {
return custom_obj_name;
}
void set_custom_obj_name(std::string def) {
custom_obj_name = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const InputToCustomObjSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, InputToCustomObjSpec& nlohmann_json_t);
};
class InputTokenSpec {
private:
TokenArgument arg;
public:
const TokenArgument& get_arg() const {
return arg;
}
void set_arg(TokenArgument def) {
arg = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const InputTokenSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, InputTokenSpec& nlohmann_json_t);
};
class InputSpec {
struct Void {};
public:
enum class Tag {
USER_INPUT, PARAMETER, BUFFER, TENSOR_CONSTANT, CUSTOM_OBJ, TOKEN, CONSTANT_INPUT
};
private:
std::variant<Void, UserInputSpec, InputToParameterSpec, InputToBufferSpec, InputToTensorConstantSpec, InputToCustomObjSpec, InputTokenSpec, InputToConstantInputSpec> variant_;
Tag tag_;
public:
Tag tag() const {
return tag_;
}
const UserInputSpec& get_user_input() const {
return std::get<1>(variant_);
}
void set_user_input(UserInputSpec def) {
variant_.emplace<1>(std::move(def));
tag_ = Tag::USER_INPUT;
}
const InputToParameterSpec& get_parameter() const {
return std::get<2>(variant_);
}
void set_parameter(InputToParameterSpec def) {
variant_.emplace<2>(std::move(def));
tag_ = Tag::PARAMETER;
}
const InputToBufferSpec& get_buffer() const {
return std::get<3>(variant_);
}
void set_buffer(InputToBufferSpec def) {
variant_.emplace<3>(std::move(def));
tag_ = Tag::BUFFER;
}
const InputToTensorConstantSpec& get_tensor_constant() const {
return std::get<4>(variant_);
}
void set_tensor_constant(InputToTensorConstantSpec def) {
variant_.emplace<4>(std::move(def));
tag_ = Tag::TENSOR_CONSTANT;
}
const InputToCustomObjSpec& get_custom_obj() const {
return std::get<5>(variant_);
}
void set_custom_obj(InputToCustomObjSpec def) {
variant_.emplace<5>(std::move(def));
tag_ = Tag::CUSTOM_OBJ;
}
const InputTokenSpec& get_token() const {
return std::get<6>(variant_);
}
void set_token(InputTokenSpec def) {
variant_.emplace<6>(std::move(def));
tag_ = Tag::TOKEN;
}
const InputToConstantInputSpec& get_constant_input() const {
return std::get<7>(variant_);
}
void set_constant_input(InputToConstantInputSpec def) {
variant_.emplace<7>(std::move(def));
tag_ = Tag::CONSTANT_INPUT;
}
friend void to_json(nlohmann::json& nlohmann_json_j, const InputSpec& nlohmann_json_t) {
if (nlohmann_json_t.tag_ == Tag::USER_INPUT) {
nlohmann_json_j["user_input"] = nlohmann_json_t.get_user_input();
return;
}
if (nlohmann_json_t.tag_ == Tag::PARAMETER) {
nlohmann_json_j["parameter"] = nlohmann_json_t.get_parameter();
return;
}
if (nlohmann_json_t.tag_ == Tag::BUFFER) {
nlohmann_json_j["buffer"] = nlohmann_json_t.get_buffer();
return;
}
if (nlohmann_json_t.tag_ == Tag::TENSOR_CONSTANT) {
nlohmann_json_j["tensor_constant"] = nlohmann_json_t.get_tensor_constant();
return;
}
if (nlohmann_json_t.tag_ == Tag::CUSTOM_OBJ) {
nlohmann_json_j["custom_obj"] = nlohmann_json_t.get_custom_obj();
return;
}
if (nlohmann_json_t.tag_ == Tag::TOKEN) {
nlohmann_json_j["token"] = nlohmann_json_t.get_token();
return;
}
if (nlohmann_json_t.tag_ == Tag::CONSTANT_INPUT) {
nlohmann_json_j["constant_input"] = nlohmann_json_t.get_constant_input();
return;
}
}
friend void from_json(const nlohmann::json& nlohmann_json_j, InputSpec& nlohmann_json_t) {
if (nlohmann_json_j.contains("user_input")) {
nlohmann_json_t.variant_.emplace<1>(nlohmann_json_j.at("user_input").template get<UserInputSpec>());
nlohmann_json_t.tag_ = Tag::USER_INPUT;
return;
}
if (nlohmann_json_j.contains("parameter")) {
nlohmann_json_t.variant_.emplace<2>(nlohmann_json_j.at("parameter").template get<InputToParameterSpec>());
nlohmann_json_t.tag_ = Tag::PARAMETER;
return;
}
if (nlohmann_json_j.contains("buffer")) {
nlohmann_json_t.variant_.emplace<3>(nlohmann_json_j.at("buffer").template get<InputToBufferSpec>());
nlohmann_json_t.tag_ = Tag::BUFFER;
return;
}
if (nlohmann_json_j.contains("tensor_constant")) {
nlohmann_json_t.variant_.emplace<4>(nlohmann_json_j.at("tensor_constant").template get<InputToTensorConstantSpec>());
nlohmann_json_t.tag_ = Tag::TENSOR_CONSTANT;
return;
}
if (nlohmann_json_j.contains("custom_obj")) {
nlohmann_json_t.variant_.emplace<5>(nlohmann_json_j.at("custom_obj").template get<InputToCustomObjSpec>());
nlohmann_json_t.tag_ = Tag::CUSTOM_OBJ;
return;
}
if (nlohmann_json_j.contains("token")) {
nlohmann_json_t.variant_.emplace<6>(nlohmann_json_j.at("token").template get<InputTokenSpec>());
nlohmann_json_t.tag_ = Tag::TOKEN;
return;
}
if (nlohmann_json_j.contains("constant_input")) {
nlohmann_json_t.variant_.emplace<7>(nlohmann_json_j.at("constant_input").template get<InputToConstantInputSpec>());
nlohmann_json_t.tag_ = Tag::CONSTANT_INPUT;
return;
}
}
};
inline std::string_view printEnum(const InputSpec::Tag& e) {
switch (e) {
case InputSpec::Tag::USER_INPUT: return "USER_INPUT";
case InputSpec::Tag::PARAMETER: return "PARAMETER";
case InputSpec::Tag::BUFFER: return "BUFFER";
case InputSpec::Tag::TENSOR_CONSTANT: return "TENSOR_CONSTANT";
case InputSpec::Tag::CUSTOM_OBJ: return "CUSTOM_OBJ";
case InputSpec::Tag::TOKEN: return "TOKEN";
case InputSpec::Tag::CONSTANT_INPUT: return "CONSTANT_INPUT";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, InputSpec::Tag& t) {
if (s == "USER_INPUT") { t = InputSpec::Tag::USER_INPUT; return; }
if (s == "PARAMETER") { t = InputSpec::Tag::PARAMETER; return; }
if (s == "BUFFER") { t = InputSpec::Tag::BUFFER; return; }
if (s == "TENSOR_CONSTANT") { t = InputSpec::Tag::TENSOR_CONSTANT; return; }
if (s == "CUSTOM_OBJ") { t = InputSpec::Tag::CUSTOM_OBJ; return; }
if (s == "TOKEN") { t = InputSpec::Tag::TOKEN; return; }
if (s == "CONSTANT_INPUT") { t = InputSpec::Tag::CONSTANT_INPUT; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
class UserOutputSpec {
private:
Argument arg;
public:
const Argument& get_arg() const {
return arg;
}
void set_arg(Argument def) {
arg = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const UserOutputSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, UserOutputSpec& nlohmann_json_t);
};
class LossOutputSpec {
private:
TensorArgument arg;
public:
const TensorArgument& get_arg() const {
return arg;
}
void set_arg(TensorArgument def) {
arg = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const LossOutputSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, LossOutputSpec& nlohmann_json_t);
};
class BufferMutationSpec {
private:
TensorArgument arg;
std::string buffer_name;
public:
const TensorArgument& get_arg() const {
return arg;
}
void set_arg(TensorArgument def) {
arg = std::move(def);
}
const std::string& get_buffer_name() const {
return buffer_name;
}
void set_buffer_name(std::string def) {
buffer_name = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const BufferMutationSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, BufferMutationSpec& nlohmann_json_t);
};
class GradientToParameterSpec {
private:
TensorArgument arg;
std::string parameter_name;
public:
const TensorArgument& get_arg() const {
return arg;
}
void set_arg(TensorArgument def) {
arg = std::move(def);
}
const std::string& get_parameter_name() const {
return parameter_name;
}
void set_parameter_name(std::string def) {
parameter_name = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const GradientToParameterSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, GradientToParameterSpec& nlohmann_json_t);
};
class GradientToUserInputSpec {
private:
TensorArgument arg;
std::string user_input_name;
public:
const TensorArgument& get_arg() const {
return arg;
}
void set_arg(TensorArgument def) {
arg = std::move(def);
}
const std::string& get_user_input_name() const {
return user_input_name;
}
void set_user_input_name(std::string def) {
user_input_name = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const GradientToUserInputSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, GradientToUserInputSpec& nlohmann_json_t);
};
class UserInputMutationSpec {
private:
TensorArgument arg;
std::string user_input_name;
public:
const TensorArgument& get_arg() const {
return arg;
}
void set_arg(TensorArgument def) {
arg = std::move(def);
}
const std::string& get_user_input_name() const {
return user_input_name;
}
void set_user_input_name(std::string def) {
user_input_name = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const UserInputMutationSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, UserInputMutationSpec& nlohmann_json_t);
};
class OutputTokenSpec {
private:
TokenArgument arg;
public:
const TokenArgument& get_arg() const {
return arg;
}
void set_arg(TokenArgument def) {
arg = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const OutputTokenSpec& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, OutputTokenSpec& nlohmann_json_t);
};
class OutputSpec {
struct Void {};
public:
enum class Tag {
USER_OUTPUT, LOSS_OUTPUT, BUFFER_MUTATION, GRADIENT_TO_PARAMETER, GRADIENT_TO_USER_INPUT, USER_INPUT_MUTATION, TOKEN
};
private:
std::variant<Void, UserOutputSpec, LossOutputSpec, BufferMutationSpec, GradientToParameterSpec, GradientToUserInputSpec, UserInputMutationSpec, OutputTokenSpec> variant_;
Tag tag_;
public:
Tag tag() const {
return tag_;
}
const UserOutputSpec& get_user_output() const {
return std::get<1>(variant_);
}
void set_user_output(UserOutputSpec def) {
variant_.emplace<1>(std::move(def));
tag_ = Tag::USER_OUTPUT;
}
const LossOutputSpec& get_loss_output() const {
return std::get<2>(variant_);
}
void set_loss_output(LossOutputSpec def) {
variant_.emplace<2>(std::move(def));
tag_ = Tag::LOSS_OUTPUT;
}
const BufferMutationSpec& get_buffer_mutation() const {
return std::get<3>(variant_);
}
void set_buffer_mutation(BufferMutationSpec def) {
variant_.emplace<3>(std::move(def));
tag_ = Tag::BUFFER_MUTATION;
}
const GradientToParameterSpec& get_gradient_to_parameter() const {
return std::get<4>(variant_);
}
void set_gradient_to_parameter(GradientToParameterSpec def) {
variant_.emplace<4>(std::move(def));
tag_ = Tag::GRADIENT_TO_PARAMETER;
}
const GradientToUserInputSpec& get_gradient_to_user_input() const {
return std::get<5>(variant_);
}
void set_gradient_to_user_input(GradientToUserInputSpec def) {
variant_.emplace<5>(std::move(def));
tag_ = Tag::GRADIENT_TO_USER_INPUT;
}
const UserInputMutationSpec& get_user_input_mutation() const {
return std::get<6>(variant_);
}
void set_user_input_mutation(UserInputMutationSpec def) {
variant_.emplace<6>(std::move(def));
tag_ = Tag::USER_INPUT_MUTATION;
}
const OutputTokenSpec& get_token() const {
return std::get<7>(variant_);
}
void set_token(OutputTokenSpec def) {
variant_.emplace<7>(std::move(def));
tag_ = Tag::TOKEN;
}
friend void to_json(nlohmann::json& nlohmann_json_j, const OutputSpec& nlohmann_json_t) {
if (nlohmann_json_t.tag_ == Tag::USER_OUTPUT) {
nlohmann_json_j["user_output"] = nlohmann_json_t.get_user_output();
return;
}
if (nlohmann_json_t.tag_ == Tag::LOSS_OUTPUT) {
nlohmann_json_j["loss_output"] = nlohmann_json_t.get_loss_output();
return;
}
if (nlohmann_json_t.tag_ == Tag::BUFFER_MUTATION) {
nlohmann_json_j["buffer_mutation"] = nlohmann_json_t.get_buffer_mutation();
return;
}
if (nlohmann_json_t.tag_ == Tag::GRADIENT_TO_PARAMETER) {
nlohmann_json_j["gradient_to_parameter"] = nlohmann_json_t.get_gradient_to_parameter();
return;
}
if (nlohmann_json_t.tag_ == Tag::GRADIENT_TO_USER_INPUT) {
nlohmann_json_j["gradient_to_user_input"] = nlohmann_json_t.get_gradient_to_user_input();
return;
}
if (nlohmann_json_t.tag_ == Tag::USER_INPUT_MUTATION) {
nlohmann_json_j["user_input_mutation"] = nlohmann_json_t.get_user_input_mutation();
return;
}
if (nlohmann_json_t.tag_ == Tag::TOKEN) {
nlohmann_json_j["token"] = nlohmann_json_t.get_token();
return;
}
}
friend void from_json(const nlohmann::json& nlohmann_json_j, OutputSpec& nlohmann_json_t) {
if (nlohmann_json_j.contains("user_output")) {
nlohmann_json_t.variant_.emplace<1>(nlohmann_json_j.at("user_output").template get<UserOutputSpec>());
nlohmann_json_t.tag_ = Tag::USER_OUTPUT;
return;
}
if (nlohmann_json_j.contains("loss_output")) {
nlohmann_json_t.variant_.emplace<2>(nlohmann_json_j.at("loss_output").template get<LossOutputSpec>());
nlohmann_json_t.tag_ = Tag::LOSS_OUTPUT;
return;
}
if (nlohmann_json_j.contains("buffer_mutation")) {
nlohmann_json_t.variant_.emplace<3>(nlohmann_json_j.at("buffer_mutation").template get<BufferMutationSpec>());
nlohmann_json_t.tag_ = Tag::BUFFER_MUTATION;
return;
}
if (nlohmann_json_j.contains("gradient_to_parameter")) {
nlohmann_json_t.variant_.emplace<4>(nlohmann_json_j.at("gradient_to_parameter").template get<GradientToParameterSpec>());
nlohmann_json_t.tag_ = Tag::GRADIENT_TO_PARAMETER;
return;
}
if (nlohmann_json_j.contains("gradient_to_user_input")) {
nlohmann_json_t.variant_.emplace<5>(nlohmann_json_j.at("gradient_to_user_input").template get<GradientToUserInputSpec>());
nlohmann_json_t.tag_ = Tag::GRADIENT_TO_USER_INPUT;
return;
}
if (nlohmann_json_j.contains("user_input_mutation")) {
nlohmann_json_t.variant_.emplace<6>(nlohmann_json_j.at("user_input_mutation").template get<UserInputMutationSpec>());
nlohmann_json_t.tag_ = Tag::USER_INPUT_MUTATION;
return;
}
if (nlohmann_json_j.contains("token")) {
nlohmann_json_t.variant_.emplace<7>(nlohmann_json_j.at("token").template get<OutputTokenSpec>());
nlohmann_json_t.tag_ = Tag::TOKEN;
return;
}
}
};
inline std::string_view printEnum(const OutputSpec::Tag& e) {
switch (e) {
case OutputSpec::Tag::USER_OUTPUT: return "USER_OUTPUT";
case OutputSpec::Tag::LOSS_OUTPUT: return "LOSS_OUTPUT";
case OutputSpec::Tag::BUFFER_MUTATION: return "BUFFER_MUTATION";
case OutputSpec::Tag::GRADIENT_TO_PARAMETER: return "GRADIENT_TO_PARAMETER";
case OutputSpec::Tag::GRADIENT_TO_USER_INPUT: return "GRADIENT_TO_USER_INPUT";
case OutputSpec::Tag::USER_INPUT_MUTATION: return "USER_INPUT_MUTATION";
case OutputSpec::Tag::TOKEN: return "TOKEN";
default:
throw std::runtime_error("Unknown enum value");
}
}
inline void parseEnum(std::string_view s, OutputSpec::Tag& t) {
if (s == "USER_OUTPUT") { t = OutputSpec::Tag::USER_OUTPUT; return; }
if (s == "LOSS_OUTPUT") { t = OutputSpec::Tag::LOSS_OUTPUT; return; }
if (s == "BUFFER_MUTATION") { t = OutputSpec::Tag::BUFFER_MUTATION; return; }
if (s == "GRADIENT_TO_PARAMETER") { t = OutputSpec::Tag::GRADIENT_TO_PARAMETER; return; }
if (s == "GRADIENT_TO_USER_INPUT") { t = OutputSpec::Tag::GRADIENT_TO_USER_INPUT; return; }
if (s == "USER_INPUT_MUTATION") { t = OutputSpec::Tag::USER_INPUT_MUTATION; return; }
if (s == "TOKEN") { t = OutputSpec::Tag::TOKEN; return; }
throw std::runtime_error("Unknown enum value: " + std::string{s});
}
class GraphSignature {
private:
std::vector<InputSpec> input_specs;
std::vector<OutputSpec> output_specs;
public:
const std::vector<InputSpec>& get_input_specs() const {
return input_specs;
}
void set_input_specs(std::vector<InputSpec> def) {
input_specs = std::move(def);
}
const std::vector<OutputSpec>& get_output_specs() const {
return output_specs;
}
void set_output_specs(std::vector<OutputSpec> def) {
output_specs = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const GraphSignature& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, GraphSignature& nlohmann_json_t);
};
class RangeConstraint {
private:
std::optional<int64_t> min_val;
std::optional<int64_t> max_val;
public:
const std::optional<int64_t>& get_min_val() const {
return min_val;
}
void set_min_val(std::optional<int64_t> def) {
min_val = std::move(def);
}
const std::optional<int64_t>& get_max_val() const {
return max_val;
}
void set_max_val(std::optional<int64_t> def) {
max_val = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const RangeConstraint& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, RangeConstraint& nlohmann_json_t);
};
class ModuleCallSignature {
private:
std::vector<Argument> inputs;
std::vector<Argument> outputs;
std::string in_spec;
std::string out_spec;
std::optional<std::vector<std::string>> forward_arg_names = std::nullopt;
public:
const std::vector<Argument>& get_inputs() const {
return inputs;
}
void set_inputs(std::vector<Argument> def) {
inputs = std::move(def);
}
const std::vector<Argument>& get_outputs() const {
return outputs;
}
void set_outputs(std::vector<Argument> def) {
outputs = std::move(def);
}
const std::string& get_in_spec() const {
return in_spec;
}
void set_in_spec(std::string def) {
in_spec = std::move(def);
}
const std::string& get_out_spec() const {
return out_spec;
}
void set_out_spec(std::string def) {
out_spec = std::move(def);
}
const std::optional<std::vector<std::string>>& get_forward_arg_names() const {
return forward_arg_names;
}
void set_forward_arg_names(std::optional<std::vector<std::string>> def) {
forward_arg_names = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const ModuleCallSignature& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, ModuleCallSignature& nlohmann_json_t);
};
class ModuleCallEntry {
private:
std::string fqn;
std::optional<ModuleCallSignature> signature = std::nullopt;
public:
const std::string& get_fqn() const {
return fqn;
}
void set_fqn(std::string def) {
fqn = std::move(def);
}
const std::optional<ModuleCallSignature>& get_signature() const {
return signature;
}
void set_signature(std::optional<ModuleCallSignature> def) {
signature = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const ModuleCallEntry& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, ModuleCallEntry& nlohmann_json_t);
};
class NamedTupleDef {
private:
std::vector<std::string> field_names;
public:
const std::vector<std::string>& get_field_names() const {
return field_names;
}
void set_field_names(std::vector<std::string> def) {
field_names = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const NamedTupleDef& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, NamedTupleDef& nlohmann_json_t);
};
class GraphModule {
private:
Graph graph;
GraphSignature signature;
std::vector<ModuleCallEntry> module_call_graph;
std::unordered_map<std::string, std::string> metadata = {};
std::unordered_map<std::string, NamedTupleDef> treespec_namedtuple_fields = {};
public:
const Graph& get_graph() const {
return graph;
}
void set_graph(Graph def) {
graph = std::move(def);
}
const GraphSignature& get_signature() const {
return signature;
}
void set_signature(GraphSignature def) {
signature = std::move(def);
}
const std::vector<ModuleCallEntry>& get_module_call_graph() const {
return module_call_graph;
}
void set_module_call_graph(std::vector<ModuleCallEntry> def) {
module_call_graph = std::move(def);
}
const std::unordered_map<std::string, std::string>& get_metadata() const {
return metadata;
}
void set_metadata(std::unordered_map<std::string, std::string> def) {
metadata = std::move(def);
}
const std::unordered_map<std::string, NamedTupleDef>& get_treespec_namedtuple_fields() const {
return treespec_namedtuple_fields;
}
void set_treespec_namedtuple_fields(std::unordered_map<std::string, NamedTupleDef> def) {
treespec_namedtuple_fields = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const GraphModule& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, GraphModule& nlohmann_json_t);
};
class SchemaVersion {
private:
int64_t major;
int64_t minor;
public:
const int64_t& get_major() const {
return major;
}
void set_major(int64_t def) {
major = std::move(def);
}
const int64_t& get_minor() const {
return minor;
}
void set_minor(int64_t def) {
minor = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const SchemaVersion& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, SchemaVersion& nlohmann_json_t);
};
class ExportedProgram {
private:
GraphModule graph_module;
std::unordered_map<std::string, int64_t> opset_version;
std::unordered_map<std::string, RangeConstraint> range_constraints;
SchemaVersion schema_version;
std::vector<std::string> verifiers = {};
std::string torch_version = "<=2.4";
public:
const GraphModule& get_graph_module() const {
return graph_module;
}
void set_graph_module(GraphModule def) {
graph_module = std::move(def);
}
const std::unordered_map<std::string, int64_t>& get_opset_version() const {
return opset_version;
}
void set_opset_version(std::unordered_map<std::string, int64_t> def) {
opset_version = std::move(def);
}
const std::unordered_map<std::string, RangeConstraint>& get_range_constraints() const {
return range_constraints;
}
void set_range_constraints(std::unordered_map<std::string, RangeConstraint> def) {
range_constraints = std::move(def);
}
const SchemaVersion& get_schema_version() const {
return schema_version;
}
void set_schema_version(SchemaVersion def) {
schema_version = std::move(def);
}
const std::vector<std::string>& get_verifiers() const {
return verifiers;
}
void set_verifiers(std::vector<std::string> def) {
verifiers = std::move(def);
}
const std::string& get_torch_version() const {
return torch_version;
}
void set_torch_version(std::string def) {
torch_version = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const ExportedProgram& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, ExportedProgram& nlohmann_json_t);
};
class Program {
private:
std::unordered_map<std::string, ExportedProgram> methods;
public:
const std::unordered_map<std::string, ExportedProgram>& get_methods() const {
return methods;
}
void set_methods(std::unordered_map<std::string, ExportedProgram> def) {
methods = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const Program& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, Program& nlohmann_json_t);
};
class Model {
private:
std::string name;
std::unordered_map<std::string, std::string> tensorPaths;
Program program;
std::unordered_map<std::string, Program> delegates;
std::unordered_map<std::string, std::string> deviceAllocationMap;
std::unordered_map<std::string, std::string> constantPaths;
public:
const std::string& get_name() const {
return name;
}
void set_name(std::string def) {
name = std::move(def);
}
const std::unordered_map<std::string, std::string>& get_tensorPaths() const {
return tensorPaths;
}
void set_tensorPaths(std::unordered_map<std::string, std::string> def) {
tensorPaths = std::move(def);
}
const Program& get_program() const {
return program;
}
void set_program(Program def) {
program = std::move(def);
}
const std::unordered_map<std::string, Program>& get_delegates() const {
return delegates;
}
void set_delegates(std::unordered_map<std::string, Program> def) {
delegates = std::move(def);
}
const std::unordered_map<std::string, std::string>& get_deviceAllocationMap() const {
return deviceAllocationMap;
}
void set_deviceAllocationMap(std::unordered_map<std::string, std::string> def) {
deviceAllocationMap = std::move(def);
}
const std::unordered_map<std::string, std::string>& get_constantPaths() const {
return constantPaths;
}
void set_constantPaths(std::unordered_map<std::string, std::string> def) {
constantPaths = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const Model& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, Model& nlohmann_json_t);
};
class AOTInductorModelPickleData {
private:
std::string library_basename;
std::vector<std::string> input_names;
std::vector<std::string> output_names;
std::optional<int64_t> floating_point_input_dtype = std::nullopt;
std::optional<int64_t> floating_point_output_dtype = std::nullopt;
std::optional<bool> aot_inductor_model_is_cpu = std::nullopt;
public:
const std::string& get_library_basename() const {
return library_basename;
}
void set_library_basename(std::string def) {
library_basename = std::move(def);
}
const std::vector<std::string>& get_input_names() const {
return input_names;
}
void set_input_names(std::vector<std::string> def) {
input_names = std::move(def);
}
const std::vector<std::string>& get_output_names() const {
return output_names;
}
void set_output_names(std::vector<std::string> def) {
output_names = std::move(def);
}
const std::optional<int64_t>& get_floating_point_input_dtype() const {
return floating_point_input_dtype;
}
void set_floating_point_input_dtype(std::optional<int64_t> def) {
floating_point_input_dtype = std::move(def);
}
const std::optional<int64_t>& get_floating_point_output_dtype() const {
return floating_point_output_dtype;
}
void set_floating_point_output_dtype(std::optional<int64_t> def) {
floating_point_output_dtype = std::move(def);
}
const std::optional<bool>& get_aot_inductor_model_is_cpu() const {
return aot_inductor_model_is_cpu;
}
void set_aot_inductor_model_is_cpu(std::optional<bool> def) {
aot_inductor_model_is_cpu = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const AOTInductorModelPickleData& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, AOTInductorModelPickleData& nlohmann_json_t);
};
class ExternKernelNode {
private:
std::string name;
Node node;
public:
const std::string& get_name() const {
return name;
}
void set_name(std::string def) {
name = std::move(def);
}
const Node& get_node() const {
return node;
}
void set_node(Node def) {
node = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const ExternKernelNode& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, ExternKernelNode& nlohmann_json_t);
};
class ExternKernelNodes {
private:
std::vector<ExternKernelNode> nodes;
public:
const std::vector<ExternKernelNode>& get_nodes() const {
return nodes;
}
void set_nodes(std::vector<ExternKernelNode> def) {
nodes = std::move(def);
}
friend void to_json(nlohmann::json& nlohmann_json_j, const ExternKernelNodes& nlohmann_json_t);
friend void from_json(const nlohmann::json& nlohmann_json_j, ExternKernelNodes& nlohmann_json_t);
};
inline void to_json(nlohmann::json& nlohmann_json_j, const AOTInductorModelPickleData& nlohmann_json_t) {
nlohmann_json_j["library_basename"] = nlohmann_json_t.library_basename;
nlohmann_json_j["input_names"] = nlohmann_json_t.input_names;
nlohmann_json_j["output_names"] = nlohmann_json_t.output_names;
nlohmann_json_j["floating_point_input_dtype"] = nlohmann_json_t.floating_point_input_dtype;
nlohmann_json_j["floating_point_output_dtype"] = nlohmann_json_t.floating_point_output_dtype;
nlohmann_json_j["aot_inductor_model_is_cpu"] = nlohmann_json_t.aot_inductor_model_is_cpu;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, AOTInductorModelPickleData& nlohmann_json_t) {
AOTInductorModelPickleData nlohmann_json_default_obj;
nlohmann_json_t.library_basename = nlohmann_json_j.value("library_basename", nlohmann_json_default_obj.library_basename);
nlohmann_json_t.input_names = nlohmann_json_j.value("input_names", nlohmann_json_default_obj.input_names);
nlohmann_json_t.output_names = nlohmann_json_j.value("output_names", nlohmann_json_default_obj.output_names);
nlohmann_json_t.floating_point_input_dtype = nlohmann_json_j.value("floating_point_input_dtype", nlohmann_json_default_obj.floating_point_input_dtype);
nlohmann_json_t.floating_point_output_dtype = nlohmann_json_j.value("floating_point_output_dtype", nlohmann_json_default_obj.floating_point_output_dtype);
nlohmann_json_t.aot_inductor_model_is_cpu = nlohmann_json_j.value("aot_inductor_model_is_cpu", nlohmann_json_default_obj.aot_inductor_model_is_cpu);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const BufferMutationSpec& nlohmann_json_t) {
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
nlohmann_json_j["buffer_name"] = nlohmann_json_t.buffer_name;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, BufferMutationSpec& nlohmann_json_t) {
BufferMutationSpec nlohmann_json_default_obj;
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
nlohmann_json_t.buffer_name = nlohmann_json_j.value("buffer_name", nlohmann_json_default_obj.buffer_name);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const CustomObjArgument& nlohmann_json_t) {
nlohmann_json_j["name"] = nlohmann_json_t.name;
nlohmann_json_j["class_fqn"] = nlohmann_json_t.class_fqn;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, CustomObjArgument& nlohmann_json_t) {
CustomObjArgument nlohmann_json_default_obj;
nlohmann_json_t.name = nlohmann_json_j.value("name", nlohmann_json_default_obj.name);
nlohmann_json_t.class_fqn = nlohmann_json_j.value("class_fqn", nlohmann_json_default_obj.class_fqn);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const Device& nlohmann_json_t) {
nlohmann_json_j["type"] = nlohmann_json_t.type;
nlohmann_json_j["index"] = nlohmann_json_t.index;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, Device& nlohmann_json_t) {
Device nlohmann_json_default_obj;
nlohmann_json_t.type = nlohmann_json_j.value("type", nlohmann_json_default_obj.type);
nlohmann_json_t.index = nlohmann_json_j.value("index", nlohmann_json_default_obj.index);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const ExportedProgram& nlohmann_json_t) {
nlohmann_json_j["graph_module"] = nlohmann_json_t.graph_module;
nlohmann_json_j["opset_version"] = nlohmann_json_t.opset_version;
nlohmann_json_j["range_constraints"] = nlohmann_json_t.range_constraints;
nlohmann_json_j["schema_version"] = nlohmann_json_t.schema_version;
nlohmann_json_j["verifiers"] = nlohmann_json_t.verifiers;
nlohmann_json_j["torch_version"] = nlohmann_json_t.torch_version;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, ExportedProgram& nlohmann_json_t) {
ExportedProgram nlohmann_json_default_obj;
nlohmann_json_t.graph_module = nlohmann_json_j.value("graph_module", nlohmann_json_default_obj.graph_module);
nlohmann_json_t.opset_version = nlohmann_json_j.value("opset_version", nlohmann_json_default_obj.opset_version);
nlohmann_json_t.range_constraints = nlohmann_json_j.value("range_constraints", nlohmann_json_default_obj.range_constraints);
nlohmann_json_t.schema_version = nlohmann_json_j.value("schema_version", nlohmann_json_default_obj.schema_version);
nlohmann_json_t.verifiers = nlohmann_json_j.value("verifiers", nlohmann_json_default_obj.verifiers);
nlohmann_json_t.torch_version = nlohmann_json_j.value("torch_version", nlohmann_json_default_obj.torch_version);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const ExternKernelNode& nlohmann_json_t) {
nlohmann_json_j["name"] = nlohmann_json_t.name;
nlohmann_json_j["node"] = nlohmann_json_t.node;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, ExternKernelNode& nlohmann_json_t) {
ExternKernelNode nlohmann_json_default_obj;
nlohmann_json_t.name = nlohmann_json_j.value("name", nlohmann_json_default_obj.name);
nlohmann_json_t.node = nlohmann_json_j.value("node", nlohmann_json_default_obj.node);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const ExternKernelNodes& nlohmann_json_t) {
nlohmann_json_j["nodes"] = nlohmann_json_t.nodes;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, ExternKernelNodes& nlohmann_json_t) {
ExternKernelNodes nlohmann_json_default_obj;
nlohmann_json_t.nodes = nlohmann_json_j.value("nodes", nlohmann_json_default_obj.nodes);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const GradientToParameterSpec& nlohmann_json_t) {
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
nlohmann_json_j["parameter_name"] = nlohmann_json_t.parameter_name;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, GradientToParameterSpec& nlohmann_json_t) {
GradientToParameterSpec nlohmann_json_default_obj;
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
nlohmann_json_t.parameter_name = nlohmann_json_j.value("parameter_name", nlohmann_json_default_obj.parameter_name);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const GradientToUserInputSpec& nlohmann_json_t) {
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
nlohmann_json_j["user_input_name"] = nlohmann_json_t.user_input_name;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, GradientToUserInputSpec& nlohmann_json_t) {
GradientToUserInputSpec nlohmann_json_default_obj;
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
nlohmann_json_t.user_input_name = nlohmann_json_j.value("user_input_name", nlohmann_json_default_obj.user_input_name);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const Graph& nlohmann_json_t) {
nlohmann_json_j["inputs"] = nlohmann_json_t.inputs;
nlohmann_json_j["outputs"] = nlohmann_json_t.outputs;
nlohmann_json_j["nodes"] = nlohmann_json_t.nodes;
nlohmann_json_j["tensor_values"] = nlohmann_json_t.tensor_values;
nlohmann_json_j["sym_int_values"] = nlohmann_json_t.sym_int_values;
nlohmann_json_j["sym_bool_values"] = nlohmann_json_t.sym_bool_values;
nlohmann_json_j["is_single_tensor_return"] = nlohmann_json_t.is_single_tensor_return;
nlohmann_json_j["custom_obj_values"] = nlohmann_json_t.custom_obj_values;
nlohmann_json_j["sym_float_values"] = nlohmann_json_t.sym_float_values;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, Graph& nlohmann_json_t) {
Graph nlohmann_json_default_obj;
nlohmann_json_t.inputs = nlohmann_json_j.value("inputs", nlohmann_json_default_obj.inputs);
nlohmann_json_t.outputs = nlohmann_json_j.value("outputs", nlohmann_json_default_obj.outputs);
nlohmann_json_t.nodes = nlohmann_json_j.value("nodes", nlohmann_json_default_obj.nodes);
nlohmann_json_t.tensor_values = nlohmann_json_j.value("tensor_values", nlohmann_json_default_obj.tensor_values);
nlohmann_json_t.sym_int_values = nlohmann_json_j.value("sym_int_values", nlohmann_json_default_obj.sym_int_values);
nlohmann_json_t.sym_bool_values = nlohmann_json_j.value("sym_bool_values", nlohmann_json_default_obj.sym_bool_values);
nlohmann_json_t.is_single_tensor_return = nlohmann_json_j.value("is_single_tensor_return", nlohmann_json_default_obj.is_single_tensor_return);
nlohmann_json_t.custom_obj_values = nlohmann_json_j.value("custom_obj_values", nlohmann_json_default_obj.custom_obj_values);
nlohmann_json_t.sym_float_values = nlohmann_json_j.value("sym_float_values", nlohmann_json_default_obj.sym_float_values);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const GraphArgument& nlohmann_json_t) {
nlohmann_json_j["name"] = nlohmann_json_t.name;
nlohmann_json_j["graph"] = nlohmann_json_t.graph;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, GraphArgument& nlohmann_json_t) {
GraphArgument nlohmann_json_default_obj;
nlohmann_json_t.name = nlohmann_json_j.value("name", nlohmann_json_default_obj.name);
nlohmann_json_t.graph = nlohmann_json_j.value("graph", nlohmann_json_default_obj.graph);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const GraphModule& nlohmann_json_t) {
nlohmann_json_j["graph"] = nlohmann_json_t.graph;
nlohmann_json_j["signature"] = nlohmann_json_t.signature;
nlohmann_json_j["module_call_graph"] = nlohmann_json_t.module_call_graph;
nlohmann_json_j["metadata"] = nlohmann_json_t.metadata;
nlohmann_json_j["treespec_namedtuple_fields"] = nlohmann_json_t.treespec_namedtuple_fields;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, GraphModule& nlohmann_json_t) {
GraphModule nlohmann_json_default_obj;
nlohmann_json_t.graph = nlohmann_json_j.value("graph", nlohmann_json_default_obj.graph);
nlohmann_json_t.signature = nlohmann_json_j.value("signature", nlohmann_json_default_obj.signature);
nlohmann_json_t.module_call_graph = nlohmann_json_j.value("module_call_graph", nlohmann_json_default_obj.module_call_graph);
nlohmann_json_t.metadata = nlohmann_json_j.value("metadata", nlohmann_json_default_obj.metadata);
nlohmann_json_t.treespec_namedtuple_fields = nlohmann_json_j.value("treespec_namedtuple_fields", nlohmann_json_default_obj.treespec_namedtuple_fields);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const GraphSignature& nlohmann_json_t) {
nlohmann_json_j["input_specs"] = nlohmann_json_t.input_specs;
nlohmann_json_j["output_specs"] = nlohmann_json_t.output_specs;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, GraphSignature& nlohmann_json_t) {
GraphSignature nlohmann_json_default_obj;
nlohmann_json_t.input_specs = nlohmann_json_j.value("input_specs", nlohmann_json_default_obj.input_specs);
nlohmann_json_t.output_specs = nlohmann_json_j.value("output_specs", nlohmann_json_default_obj.output_specs);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const InputToBufferSpec& nlohmann_json_t) {
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
nlohmann_json_j["buffer_name"] = nlohmann_json_t.buffer_name;
nlohmann_json_j["persistent"] = nlohmann_json_t.persistent;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, InputToBufferSpec& nlohmann_json_t) {
InputToBufferSpec nlohmann_json_default_obj;
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
nlohmann_json_t.buffer_name = nlohmann_json_j.value("buffer_name", nlohmann_json_default_obj.buffer_name);
nlohmann_json_t.persistent = nlohmann_json_j.value("persistent", nlohmann_json_default_obj.persistent);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const InputToConstantInputSpec& nlohmann_json_t) {
nlohmann_json_j["name"] = nlohmann_json_t.name;
nlohmann_json_j["value"] = nlohmann_json_t.value;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, InputToConstantInputSpec& nlohmann_json_t) {
InputToConstantInputSpec nlohmann_json_default_obj;
nlohmann_json_t.name = nlohmann_json_j.value("name", nlohmann_json_default_obj.name);
nlohmann_json_t.value = nlohmann_json_j.value("value", nlohmann_json_default_obj.value);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const InputToCustomObjSpec& nlohmann_json_t) {
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
nlohmann_json_j["custom_obj_name"] = nlohmann_json_t.custom_obj_name;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, InputToCustomObjSpec& nlohmann_json_t) {
InputToCustomObjSpec nlohmann_json_default_obj;
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
nlohmann_json_t.custom_obj_name = nlohmann_json_j.value("custom_obj_name", nlohmann_json_default_obj.custom_obj_name);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const InputToParameterSpec& nlohmann_json_t) {
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
nlohmann_json_j["parameter_name"] = nlohmann_json_t.parameter_name;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, InputToParameterSpec& nlohmann_json_t) {
InputToParameterSpec nlohmann_json_default_obj;
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
nlohmann_json_t.parameter_name = nlohmann_json_j.value("parameter_name", nlohmann_json_default_obj.parameter_name);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const InputToTensorConstantSpec& nlohmann_json_t) {
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
nlohmann_json_j["tensor_constant_name"] = nlohmann_json_t.tensor_constant_name;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, InputToTensorConstantSpec& nlohmann_json_t) {
InputToTensorConstantSpec nlohmann_json_default_obj;
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
nlohmann_json_t.tensor_constant_name = nlohmann_json_j.value("tensor_constant_name", nlohmann_json_default_obj.tensor_constant_name);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const InputTokenSpec& nlohmann_json_t) {
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, InputTokenSpec& nlohmann_json_t) {
InputTokenSpec nlohmann_json_default_obj;
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const LossOutputSpec& nlohmann_json_t) {
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, LossOutputSpec& nlohmann_json_t) {
LossOutputSpec nlohmann_json_default_obj;
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const Model& nlohmann_json_t) {
nlohmann_json_j["name"] = nlohmann_json_t.name;
nlohmann_json_j["tensorPaths"] = nlohmann_json_t.tensorPaths;
nlohmann_json_j["program"] = nlohmann_json_t.program;
nlohmann_json_j["delegates"] = nlohmann_json_t.delegates;
nlohmann_json_j["deviceAllocationMap"] = nlohmann_json_t.deviceAllocationMap;
nlohmann_json_j["constantPaths"] = nlohmann_json_t.constantPaths;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, Model& nlohmann_json_t) {
Model nlohmann_json_default_obj;
nlohmann_json_t.name = nlohmann_json_j.value("name", nlohmann_json_default_obj.name);
nlohmann_json_t.tensorPaths = nlohmann_json_j.value("tensorPaths", nlohmann_json_default_obj.tensorPaths);
nlohmann_json_t.program = nlohmann_json_j.value("program", nlohmann_json_default_obj.program);
nlohmann_json_t.delegates = nlohmann_json_j.value("delegates", nlohmann_json_default_obj.delegates);
nlohmann_json_t.deviceAllocationMap = nlohmann_json_j.value("deviceAllocationMap", nlohmann_json_default_obj.deviceAllocationMap);
nlohmann_json_t.constantPaths = nlohmann_json_j.value("constantPaths", nlohmann_json_default_obj.constantPaths);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const ModuleCallEntry& nlohmann_json_t) {
nlohmann_json_j["fqn"] = nlohmann_json_t.fqn;
nlohmann_json_j["signature"] = nlohmann_json_t.signature;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, ModuleCallEntry& nlohmann_json_t) {
ModuleCallEntry nlohmann_json_default_obj;
nlohmann_json_t.fqn = nlohmann_json_j.value("fqn", nlohmann_json_default_obj.fqn);
nlohmann_json_t.signature = nlohmann_json_j.value("signature", nlohmann_json_default_obj.signature);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const ModuleCallSignature& nlohmann_json_t) {
nlohmann_json_j["inputs"] = nlohmann_json_t.inputs;
nlohmann_json_j["outputs"] = nlohmann_json_t.outputs;
nlohmann_json_j["in_spec"] = nlohmann_json_t.in_spec;
nlohmann_json_j["out_spec"] = nlohmann_json_t.out_spec;
nlohmann_json_j["forward_arg_names"] = nlohmann_json_t.forward_arg_names;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, ModuleCallSignature& nlohmann_json_t) {
ModuleCallSignature nlohmann_json_default_obj;
nlohmann_json_t.inputs = nlohmann_json_j.value("inputs", nlohmann_json_default_obj.inputs);
nlohmann_json_t.outputs = nlohmann_json_j.value("outputs", nlohmann_json_default_obj.outputs);
nlohmann_json_t.in_spec = nlohmann_json_j.value("in_spec", nlohmann_json_default_obj.in_spec);
nlohmann_json_t.out_spec = nlohmann_json_j.value("out_spec", nlohmann_json_default_obj.out_spec);
nlohmann_json_t.forward_arg_names = nlohmann_json_j.value("forward_arg_names", nlohmann_json_default_obj.forward_arg_names);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const NamedArgument& nlohmann_json_t) {
nlohmann_json_j["name"] = nlohmann_json_t.name;
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
nlohmann_json_j["kind"] = nlohmann_json_t.kind;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, NamedArgument& nlohmann_json_t) {
NamedArgument nlohmann_json_default_obj;
nlohmann_json_t.name = nlohmann_json_j.value("name", nlohmann_json_default_obj.name);
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
nlohmann_json_t.kind = nlohmann_json_j.value("kind", nlohmann_json_default_obj.kind);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const NamedTupleDef& nlohmann_json_t) {
nlohmann_json_j["field_names"] = nlohmann_json_t.field_names;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, NamedTupleDef& nlohmann_json_t) {
NamedTupleDef nlohmann_json_default_obj;
nlohmann_json_t.field_names = nlohmann_json_j.value("field_names", nlohmann_json_default_obj.field_names);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const Node& nlohmann_json_t) {
nlohmann_json_j["target"] = nlohmann_json_t.target;
nlohmann_json_j["inputs"] = nlohmann_json_t.inputs;
nlohmann_json_j["outputs"] = nlohmann_json_t.outputs;
nlohmann_json_j["metadata"] = nlohmann_json_t.metadata;
nlohmann_json_j["is_hop_single_tensor_return"] = nlohmann_json_t.is_hop_single_tensor_return;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, Node& nlohmann_json_t) {
Node nlohmann_json_default_obj;
nlohmann_json_t.target = nlohmann_json_j.value("target", nlohmann_json_default_obj.target);
nlohmann_json_t.inputs = nlohmann_json_j.value("inputs", nlohmann_json_default_obj.inputs);
nlohmann_json_t.outputs = nlohmann_json_j.value("outputs", nlohmann_json_default_obj.outputs);
nlohmann_json_t.metadata = nlohmann_json_j.value("metadata", nlohmann_json_default_obj.metadata);
nlohmann_json_t.is_hop_single_tensor_return = nlohmann_json_j.value("is_hop_single_tensor_return", nlohmann_json_default_obj.is_hop_single_tensor_return);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const OutputTokenSpec& nlohmann_json_t) {
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, OutputTokenSpec& nlohmann_json_t) {
OutputTokenSpec nlohmann_json_default_obj;
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const Program& nlohmann_json_t) {
nlohmann_json_j["methods"] = nlohmann_json_t.methods;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, Program& nlohmann_json_t) {
Program nlohmann_json_default_obj;
nlohmann_json_t.methods = nlohmann_json_j.value("methods", nlohmann_json_default_obj.methods);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const RangeConstraint& nlohmann_json_t) {
nlohmann_json_j["min_val"] = nlohmann_json_t.min_val;
nlohmann_json_j["max_val"] = nlohmann_json_t.max_val;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, RangeConstraint& nlohmann_json_t) {
RangeConstraint nlohmann_json_default_obj;
nlohmann_json_t.min_val = nlohmann_json_j.value("min_val", nlohmann_json_default_obj.min_val);
nlohmann_json_t.max_val = nlohmann_json_j.value("max_val", nlohmann_json_default_obj.max_val);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const SchemaVersion& nlohmann_json_t) {
nlohmann_json_j["major"] = nlohmann_json_t.major;
nlohmann_json_j["minor"] = nlohmann_json_t.minor;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, SchemaVersion& nlohmann_json_t) {
SchemaVersion nlohmann_json_default_obj;
nlohmann_json_t.major = nlohmann_json_j.value("major", nlohmann_json_default_obj.major);
nlohmann_json_t.minor = nlohmann_json_j.value("minor", nlohmann_json_default_obj.minor);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const SymExpr& nlohmann_json_t) {
nlohmann_json_j["expr_str"] = nlohmann_json_t.expr_str;
nlohmann_json_j["hint"] = nlohmann_json_t.hint;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, SymExpr& nlohmann_json_t) {
SymExpr nlohmann_json_default_obj;
nlohmann_json_t.expr_str = nlohmann_json_j.value("expr_str", nlohmann_json_default_obj.expr_str);
nlohmann_json_t.hint = nlohmann_json_j.value("hint", nlohmann_json_default_obj.hint);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const TensorArgument& nlohmann_json_t) {
nlohmann_json_j["name"] = nlohmann_json_t.name;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, TensorArgument& nlohmann_json_t) {
TensorArgument nlohmann_json_default_obj;
nlohmann_json_t.name = nlohmann_json_j.value("name", nlohmann_json_default_obj.name);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const TensorMeta& nlohmann_json_t) {
nlohmann_json_j["dtype"] = nlohmann_json_t.dtype;
nlohmann_json_j["sizes"] = nlohmann_json_t.sizes;
nlohmann_json_j["requires_grad"] = nlohmann_json_t.requires_grad;
nlohmann_json_j["device"] = nlohmann_json_t.device;
nlohmann_json_j["strides"] = nlohmann_json_t.strides;
nlohmann_json_j["storage_offset"] = nlohmann_json_t.storage_offset;
nlohmann_json_j["layout"] = nlohmann_json_t.layout;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, TensorMeta& nlohmann_json_t) {
TensorMeta nlohmann_json_default_obj;
nlohmann_json_t.dtype = nlohmann_json_j.value("dtype", nlohmann_json_default_obj.dtype);
nlohmann_json_t.sizes = nlohmann_json_j.value("sizes", nlohmann_json_default_obj.sizes);
nlohmann_json_t.requires_grad = nlohmann_json_j.value("requires_grad", nlohmann_json_default_obj.requires_grad);
nlohmann_json_t.device = nlohmann_json_j.value("device", nlohmann_json_default_obj.device);
nlohmann_json_t.strides = nlohmann_json_j.value("strides", nlohmann_json_default_obj.strides);
nlohmann_json_t.storage_offset = nlohmann_json_j.value("storage_offset", nlohmann_json_default_obj.storage_offset);
nlohmann_json_t.layout = nlohmann_json_j.value("layout", nlohmann_json_default_obj.layout);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const TokenArgument& nlohmann_json_t) {
nlohmann_json_j["name"] = nlohmann_json_t.name;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, TokenArgument& nlohmann_json_t) {
TokenArgument nlohmann_json_default_obj;
nlohmann_json_t.name = nlohmann_json_j.value("name", nlohmann_json_default_obj.name);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const UserInputMutationSpec& nlohmann_json_t) {
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
nlohmann_json_j["user_input_name"] = nlohmann_json_t.user_input_name;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, UserInputMutationSpec& nlohmann_json_t) {
UserInputMutationSpec nlohmann_json_default_obj;
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
nlohmann_json_t.user_input_name = nlohmann_json_j.value("user_input_name", nlohmann_json_default_obj.user_input_name);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const UserInputSpec& nlohmann_json_t) {
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, UserInputSpec& nlohmann_json_t) {
UserInputSpec nlohmann_json_default_obj;
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
}
inline void to_json(nlohmann::json& nlohmann_json_j, const UserOutputSpec& nlohmann_json_t) {
nlohmann_json_j["arg"] = nlohmann_json_t.arg;
}
inline void from_json(const nlohmann::json& nlohmann_json_j, UserOutputSpec& nlohmann_json_t) {
UserOutputSpec nlohmann_json_default_obj;
nlohmann_json_t.arg = nlohmann_json_j.value("arg", nlohmann_json_default_obj.arg);
}
} // namespace _export
} // namespace torch
// clang-format on
```
|
=============================================================================================================================
SOURCE CODE FILE: init.h
LINES: 1
SIZE: 0.20 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\init.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/utils/pybind.h>
namespace torch::throughput_benchmark {
void initThroughputBenchmarkBindings(PyObject* module);
} // namespace torch::throughput_benchmark
```
|
==========================================================================================================================================
SOURCE CODE FILE: invalid_arguments.h
LINES: 1
SIZE: 0.31 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\invalid_arguments.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
#include <string>
#include <vector>
namespace torch {
std::string format_invalid_args(
PyObject* given_args,
PyObject* given_kwargs,
const std::string& function_name,
const std::vector<std::string>& options);
} // namespace torch
```
|
===============================================================================================================================
SOURCE CODE FILE: nested.h
LINES: 1
SIZE: 0.31 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\nested.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/python_arg_parser.h>
#include <ATen/core/Tensor.h>
namespace torch::utils {
at::Tensor nested_tensor_ctor(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PythonArgs& r);
} // namespace torch::utils
```
|
===================================================================================================================================
SOURCE CODE FILE: numpy_stub.h
LINES: 1
SIZE: 0.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\numpy_stub.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
#ifdef USE_NUMPY
#if !defined(NO_IMPORT_ARRAY) && !defined(WITH_NUMPY_IMPORT_ARRAY)
#define NO_IMPORT_ARRAY
#endif
#ifndef PY_ARRAY_UNIQUE_SYMBOL
#define PY_ARRAY_UNIQUE_SYMBOL __numpy_array_api
#endif
#ifndef NPY_NO_DEPRECATED_API
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#endif
#include <numpy/arrayobject.h>
#endif // USE_NUMPY
```
|
===================================================================================================================================
SOURCE CODE FILE: object_ptr.h
LINES: 1
SIZE: 2.03 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\object_ptr.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
#include <utility>
template <class T>
class TORCH_PYTHON_API THPPointer {
public:
THPPointer() : ptr(nullptr) {}
explicit THPPointer(T* ptr) noexcept : ptr(ptr) {}
THPPointer(THPPointer&& p) noexcept : ptr(std::exchange(p.ptr, nullptr)) {}
THPPointer(const THPPointer& p) = delete;
THPPointer& operator=(const THPPointer&) = delete;
~THPPointer() {
free();
}
T* get() {
return ptr;
}
const T* get() const {
return ptr;
}
THPPointer dup() const {
return dup(ptr);
}
static THPPointer dup(const T* ptr) {
Py_XINCREF(ptr);
return THPPointer(
const_cast<T*>(ptr)); // NOLINT(cppcoreguidelines-pro-type-const-cast)
}
static THPPointer none() {
Py_INCREF(Py_None);
return THPPointer(reinterpret_cast<T*>(Py_None));
}
T* release() {
T* tmp = ptr;
ptr = nullptr;
return tmp;
}
operator T*() {
return ptr;
}
THPPointer& operator=(T* new_ptr) noexcept {
free();
ptr = new_ptr;
return *this;
}
THPPointer& operator=(THPPointer&& p) noexcept {
free();
ptr = p.ptr;
p.ptr = nullptr;
return *this;
}
T* operator->() {
return ptr;
}
explicit operator bool() const {
return ptr != nullptr;
}
private:
void free();
T* ptr = nullptr;
};
/**
* An RAII-style, owning pointer to a PyObject. You must protect
* destruction of this object with the GIL.
*
* WARNING: Think twice before putting this as a field in a C++
* struct. This class does NOT take out the GIL on destruction,
* so if you will need to ensure that the destructor of your struct
* is either (a) always invoked when the GIL is taken or (b) takes
* out the GIL itself. Easiest way to avoid this problem is to
* not use THPPointer in this situation.
*/
using THPObjectPtr = THPPointer<PyObject>;
using THPCodeObjectPtr = THPPointer<PyCodeObject>;
using THPFrameObjectPtr = THPPointer<PyFrameObject>;
```
|
==================================================================================================================================
SOURCE CODE FILE: out_types.h
LINES: 1
SIZE: 0.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\out_types.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/Tensor.h>
namespace torch::utils {
TORCH_API void check_out_type_matches(
const at::Tensor& result,
std::optional<at::ScalarType> scalarType,
bool scalarType_is_none,
std::optional<at::Layout> layout,
std::optional<at::Device> device,
bool device_is_none);
}
```
|
===============================================================================================================================
SOURCE CODE FILE: pybind.h
LINES: 1
SIZE: 13.11 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\pybind.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/pythoncapi_compat.h>
#include <ATen/core/Tensor.h>
#include <ATen/core/jit_type_base.h>
#include <c10/util/irange.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <torch/csrc/Device.h>
#include <torch/csrc/Dtype.h>
#include <torch/csrc/DynamicTypes.h>
#include <torch/csrc/Generator.h>
#include <torch/csrc/MemoryFormat.h>
#include <torch/csrc/Stream.h>
#include <torch/csrc/utils/tensor_memoryformats.h>
namespace py = pybind11;
#define IS_PYBIND_2_13_PLUS PYBIND11_VERSION_HEX >= 0x020D0000
// This makes intrusive_ptr to be available as a custom pybind11 holder type,
// see
// https://pybind11.readthedocs.io/en/stable/advanced/smart_ptrs.html#custom-smart-pointers
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::intrusive_ptr<T>, true)
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonOrSharedTypePtr<T>)
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonTypePtr<T>, true)
namespace pybind11::detail {
// torch.Tensor <-> at::Tensor conversions (without unwrapping)
template <>
struct TORCH_PYTHON_API type_caster<at::Tensor> {
public:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::Tensor, _("torch.Tensor"));
bool load(handle src, bool);
static handle cast(
const at::Tensor& src,
return_value_policy /* policy */,
handle /* parent */);
};
// torch._StorageBase <-> at::Storage
template <>
struct type_caster<at::Storage> {
public:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::Storage, _("torch.StorageBase"));
bool load(handle src, bool) {
PyObject* obj = src.ptr();
if (torch::isStorage(obj)) {
value = torch::createStorage(obj);
return true;
}
return false;
}
static handle cast(
const at::Storage& src,
return_value_policy /* policy */,
handle /* parent */) {
return handle(torch::createPyObject(src));
}
};
template <>
struct type_caster<at::Generator> {
public:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::Generator, _("torch.Generator"));
bool load(handle src, bool) {
PyObject* obj = src.ptr();
if (THPGenerator_Check(obj)) {
value = reinterpret_cast<THPGenerator*>(obj)->cdata;
return true;
}
return false;
}
static handle cast(
const at::Generator& src,
return_value_policy /* policy */,
handle /* parent */) {
return handle(THPGenerator_Wrap(src));
}
};
template <>
struct TORCH_PYTHON_API type_caster<at::IntArrayRef> {
public:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::IntArrayRef, _("Tuple[int, ...]"));
bool load(handle src, bool);
static handle cast(
at::IntArrayRef src,
return_value_policy /* policy */,
handle /* parent */);
private:
std::vector<int64_t> v_value;
};
template <>
struct TORCH_PYTHON_API type_caster<at::SymIntArrayRef> {
public:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::SymIntArrayRef, _("List[int]"));
bool load(handle src, bool);
static handle cast(
at::SymIntArrayRef src,
return_value_policy /* policy */,
handle /* parent */);
private:
std::vector<c10::SymInt> v_value;
};
template <>
struct TORCH_PYTHON_API type_caster<at::ArrayRef<c10::SymNode>> {
public:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::ArrayRef<c10::SymNode>, _("List[SymNode]"));
bool load(handle src, bool);
static handle cast(
at::ArrayRef<c10::SymNode> src,
return_value_policy /* policy */,
handle /* parent */);
private:
std::vector<c10::SymNode> v_value;
};
template <>
struct type_caster<at::MemoryFormat> {
public:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::MemoryFormat, _("torch.memory_format"));
bool load(handle src, bool) {
PyObject* obj = src.ptr();
if (THPMemoryFormat_Check(obj)) {
value = reinterpret_cast<THPMemoryFormat*>(obj)->memory_format;
return true;
}
return false;
}
static handle cast(
at::MemoryFormat src,
return_value_policy /* policy */,
handle /* parent */) {
return handle(Py_NewRef(torch::utils::getTHPMemoryFormat(src)));
}
};
template <>
struct type_caster<at::Device> {
public:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::Device, _("torch.device"));
// PYBIND11_TYPE_CASTER defines a member field called value. Since at::Device
// cannot be default-initialized, we provide this constructor to explicitly
// initialize that field. The value doesn't matter as it will be overwritten
// after a successful call to load.
type_caster() : value(c10::kCPU) {}
bool load(handle src, bool) {
PyObject* obj = src.ptr();
if (THPDevice_Check(obj)) {
value = reinterpret_cast<THPDevice*>(obj)->device;
return true;
}
return false;
}
static handle cast(
const at::Device& src,
return_value_policy /* policy */,
handle /* parent */) {
return handle(THPDevice_New(src));
}
};
template <>
struct type_caster<at::ScalarType> {
public:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::ScalarType, _("torch.dtype"));
// PYBIND11_TYPE_CASTER defines a member field called value. at::ScalarType
// cannot be default-initialized, we provide this constructor to explicitly
// initialize that field. The value doesn't matter as it will be overwritten
// after a successful call to load.
type_caster() : value(at::kFloat) {}
bool load(handle src, bool) {
PyObject* obj = src.ptr();
if (THPDtype_Check(obj)) {
value = reinterpret_cast<THPDtype*>(obj)->scalar_type;
return true;
}
return false;
}
static handle cast(
const at::ScalarType& src,
return_value_policy /* policy */,
handle /* parent */) {
return Py_NewRef(torch::getTHPDtype(src));
}
};
template <>
struct type_caster<c10::Stream> {
public:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(c10::Stream, _("torch.Stream"));
// PYBIND11_TYPE_CASTER defines a member field called value. Since c10::Stream
// cannot be default-initialized, we provide this constructor to explicitly
// initialize that field. The value doesn't matter as it will be overwritten
// after a successful call to load.
type_caster() : value(c10::Stream::DEFAULT, c10::Device(c10::kCPU, 0)) {}
bool load(handle src, bool) {
PyObject* obj = src.ptr();
if (THPStream_Check(obj)) {
value = c10::Stream::unpack3(
((THPStream*)obj)->stream_id,
static_cast<c10::DeviceIndex>(((THPStream*)obj)->device_index),
static_cast<c10::DeviceType>(((THPStream*)obj)->device_type));
return true;
}
return false;
}
static handle cast(
const c10::Stream& src,
return_value_policy /* policy */,
handle /* parent */) {
return handle(THPStream_Wrap(src));
}
};
template <>
struct type_caster<c10::DispatchKey>
: public type_caster_base<c10::DispatchKey> {
using base = type_caster_base<c10::DispatchKey>;
c10::DispatchKey tmp{};
public:
bool load(handle src, bool convert) {
if (base::load(src, convert)) {
return true;
} else if (py::isinstance(
src, py::module_::import("builtins").attr("str"))) {
tmp = c10::parseDispatchKey(py::cast<std::string>(src));
value = &tmp;
return true;
}
return false;
}
static handle cast(
c10::DispatchKey src,
return_value_policy policy,
handle parent) {
return base::cast(src, policy, parent);
}
};
template <>
struct TORCH_PYTHON_API type_caster<c10::Scalar> {
public:
PYBIND11_TYPE_CASTER(
c10::Scalar,
_("Union[Number, torch.SymInt, torch.SymFloat, torch.SymBool]"));
bool load(py::handle src, bool);
static py::handle cast(
const c10::Scalar& si,
return_value_policy /* policy */,
handle /* parent */);
};
template <>
struct TORCH_PYTHON_API type_caster<c10::SymInt> {
public:
PYBIND11_TYPE_CASTER(c10::SymInt, _("Union[int, torch.SymInt]"));
bool load(py::handle src, bool);
static py::handle cast(
const c10::SymInt& si,
return_value_policy /* policy */,
handle /* parent */);
};
template <>
struct TORCH_PYTHON_API type_caster<c10::SymFloat> {
public:
PYBIND11_TYPE_CASTER(c10::SymFloat, _("float"));
bool load(py::handle src, bool);
static py::handle cast(
const c10::SymFloat& si,
return_value_policy /* policy */,
handle /* parent */);
};
template <>
struct TORCH_PYTHON_API type_caster<c10::SymBool> {
public:
PYBIND11_TYPE_CASTER(c10::SymBool, _("Union[bool, torch.SymBool]"));
bool load(py::handle src, bool);
static py::handle cast(
const c10::SymBool& si,
return_value_policy /* policy */,
handle /* parent */);
};
template <typename T>
struct type_caster<c10::complex<T>> {
public:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(c10::complex<T>, _("complex"));
bool load(handle src, bool) {
PyObject* obj = src.ptr();
// Refered from `THPUtils_unpackComplexDouble`
Py_complex py_complex = PyComplex_AsCComplex(obj);
if (py_complex.real == -1.0 && PyErr_Occurred()) {
return false;
}
// Python's Complex is always double precision.
value = c10::complex<double>(py_complex.real, py_complex.imag);
return true;
}
static handle cast(
const c10::complex<T>& complex,
return_value_policy /* policy */,
handle /* parent */) {
// Python only knows double precision complex.
return handle(PyComplex_FromDoubles(complex.real(), complex.imag()));
}
};
} // namespace pybind11::detail
namespace torch::impl {
// Use this function if you have a C++ object that is used from both C++
// and Python contexts, and you need its GIL to be released when you
// destruct it in the Python context.
//
// This function is a valid shared_ptr destructor and can be used to
// conveniently allocate a shared_ptr to an object whose destructor will be run
// without the GIL. Pass it as the second argument to shared_ptr, e.g.,
//
// shared_ptr<T>(new T(), destroy_without_gil<T>)
//
// Attaching the GIL release logic to the holder pointer rather than the
// actual destructor of T is helpful when T is Python-agnostic and
// shouldn't refer to the PYthon API.
//
// Note there are limitations to the correctness of code that makes use of this.
// In particular, if a shared_ptr is constructed from C++ code without this
// destructor and then passed to pybind11, pybind11 will happily take ownership
// of the shared_ptr (and be willing to destruct it from a context where it is
// holding the GIL). unique_ptr with a type branded deleter is less prone to
// this problem, because a stock deleter unique_ptr is not convertible with it.
// I plan to mitigate this problem by adding DEBUG-only asserts to the true C++
// destructors that the GIL is not held (using a virtual call to get to the
// Python interpreter); alternately, we could use a virtual call to simply
// ensure we release the GIL in the C++ destructor, however, this is a layering
// violation (why does code that is ostensibly Python agnostic calling into the
// GIL).
//
// Adapted from
// https://github.com/pybind/pybind11/issues/1446#issuecomment-406341510
template <typename T>
inline void destroy_without_gil(T* ptr) {
// Because the ownership of a shared_ptr is diffuse, it's not possible to
// necessarily predict whether or not the last reference to an object will
// be destructed from Python or C++. This means that in the destructor here,
// we don't necessarily know if we actually have the GIL or not; in fact,
// we don't even know if the Python interpreter still exists! Thus, we have
// to test for it before releasing the GIL.
//
// PyGILState_Check is hopefully self explanatory. But Py_IsInitialized or
// _PyIsFinalizing? Both get set at the same time during the Python
// destruction process:
// https://github.com/python/cpython/blob/d92513390a1a0da781bb08c284136f4d7abea36d/Python/pylifecycle.c#L1716-L1717
// so the operant question is whether or not you want to release the GIL after
// finalization has completed (and there is just no Python interpreter).
// Clearly there is no need to release GIL in that state, so we want
// Py_IsInitialized.
if (Py_IsInitialized() && PyGILState_Check()) {
pybind11::gil_scoped_release nogil;
delete ptr;
} else {
delete ptr;
}
}
} // namespace torch::impl
```
|
============================================================================================================================================
SOURCE CODE FILE: pycfunction_helpers.h
LINES: 1
SIZE: 0.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\pycfunction_helpers.h
ENCODING: utf-8
```h
#pragma once
#include <c10/macros/Macros.h>
#include <Python.h>
inline PyCFunction castPyCFunctionWithKeywords(PyCFunctionWithKeywords func) {
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wcast-function-type")
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wcast-function-type-strict")
return reinterpret_cast<PyCFunction>(func);
C10_DIAGNOSTIC_POP()
C10_DIAGNOSTIC_POP()
}
```
|
==============================================================================================================================================
SOURCE CODE FILE: pyobject_preservation.h
LINES: 1
SIZE: 0.18 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\pyobject_preservation.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
// This file contains utilities used for handling PyObject preservation
void clear_slots(PyTypeObject* type, PyObject* self);
```
|
==========================================================================================================================================
SOURCE CODE FILE: python_arg_parser.h
LINES: 1
SIZE: 41.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\python_arg_parser.h
ENCODING: utf-8
```h
#pragma once
// Parse arguments to Python functions implemented in C++
// This is similar to PyArg_ParseTupleAndKeywords(), but specifically handles
// the types relevant to PyTorch and distinguishes between overloaded function
// signatures.
//
// Example:
//
// static PythonArgParser parser({
// "norm(Scalar p, int64_t dim, bool keepdim=False)",
// "norm(Scalar p=2)",
// });
// ParsedArgs<3> parsed_args;
// auto r = parser.parse(args, kwargs, parsed_args);
// if (r.idx == 0) {
// norm(r.scalar(0), r.int64(1), r.bool(0));
// } else {
// norm(r.scalar(0));
// }
//
// We auto-generate most uses of PythonArgParser; the generated files
// are torch/csrc/autograd/generated/python_*.cpp
//
// Some gotchas that you should watch out for:
//
// - Note [Order of overloads matters]
// Order of overloads matters. A set of input arguments may
// bind to multiple argument specs; we will always pick the
// first one in PythonArgParser. However, when you are writing
// overloads in, e.g., native_functions.yaml, you don't have to
// worry about what order you write them, because the code
// generation logic always gives the overloads a canonical
// order, where Tensor overloads come first, before Scalar overloads.
// This logic is in sort_declarations in
// tools/autograd/gen_python_functions.py
//
// - Zero-dim tensors (e.g., torch.tensor(2)) bind to both
// Scalar and Tensor, UNLESS they require grad (in which case
// they only bind to Tensor).
#include <pybind11/pytypes.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/Device.h>
#include <torch/csrc/Dtype.h>
#include <torch/csrc/DynamicTypes.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/Generator.h>
#include <torch/csrc/Layout.h>
#include <torch/csrc/MemoryFormat.h>
#include <torch/csrc/QScheme.h>
#include <torch/csrc/Stream.h>
#include <torch/csrc/autograd/python_variable.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/dynamo/eval_frame.h>
#include <torch/csrc/jit/frontend/tracer.h>
#include <torch/csrc/python_dimname.h>
#include <torch/csrc/tensor/python_tensor.h>
#include <torch/csrc/utils/disable_torch_function.h>
#include <torch/csrc/utils/object_ptr.h>
#include <torch/csrc/utils/pybind.h>
#include <torch/csrc/utils/python_numbers.h>
#include <torch/csrc/utils/python_strings.h>
#include <torch/csrc/utils/python_symnode.h>
#include <torch/csrc/utils/six.h>
#include <ATen/DeviceAccelerator.h>
#include <ATen/PythonTorchFunctionTLS.h>
#include <ATen/core/Tensor.h>
#include <c10/util/Exception.h>
#include <c10/util/irange.h>
#include <c10/core/SymFloat.h>
#include <c10/core/SymNodeImpl.h>
#include <c10/core/DispatchKeySet.h>
#include <array>
#include <cstddef>
#include <string>
#include <vector>
inline bool THPUtils_checkScalar(PyObject* obj) {
#ifdef USE_NUMPY
if (torch::utils::is_numpy_scalar(obj)) {
return true;
}
#endif
return PyFloat_Check(obj) || PyLong_Check(obj) || PyComplex_Check(obj) ||
torch::is_symint(py::handle(obj)) ||
torch::is_symfloat(py::handle(obj)) || torch::is_symbool(py::handle(obj));
}
namespace torch {
TORCH_PYTHON_API bool should_allow_numbers_as_tensors(const std::string& name);
enum class ParameterType {
TENSOR,
SCALAR,
INT64,
SYM_INT,
DOUBLE,
COMPLEX,
TENSOR_LIST,
INT_LIST,
GENERATOR,
BOOL,
STORAGE,
PYOBJECT,
SCALARTYPE,
LAYOUT,
MEMORY_FORMAT,
DEVICE,
STREAM,
STRING,
DIMNAME,
DIMNAME_LIST,
QSCHEME,
FLOAT_LIST,
SCALAR_LIST,
SYM_INT_LIST,
DISPATCH_KEY_SET
};
struct FunctionParameter;
struct FunctionSignature;
struct PythonArgs;
// Contains bound Python arguments in declaration order
template <int N>
struct ParsedArgs {
ParsedArgs() : args() {}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
PyObject* args[N];
};
// A PythonArgParser contains a list of valid signatures. Instances are
// typically global variables and should be immutable.
struct PYBIND11_EXPORT PythonArgParser {
explicit PythonArgParser(
const std::vector<std::string>& fmts,
bool traceable = false);
// meant only for `torch` functions.
template <int N>
inline PythonArgs parse(
PyObject* self,
PyObject* args,
PyObject* kwargs,
ParsedArgs<N>& dst);
template <int N>
inline PythonArgs parse(PyObject* args, PyObject* kwargs, ParsedArgs<N>& dst);
inline PythonArgs parse(PyObject* self, ParsedArgs<0>& dst);
// Formatted strings of non-hidden signatures
std::vector<std::string> get_signatures() const;
private:
[[noreturn]] void print_error(
PyObject* self,
PyObject* args,
PyObject* kwargs,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
PyObject* parsed_args[]);
void check_deprecated(const FunctionSignature& signature);
PythonArgs raw_parse(
PyObject* self,
PyObject* args,
PyObject* kwargs,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
PyObject* parsed_args[]);
std::vector<FunctionSignature> signatures_;
std::string function_name;
size_t max_args;
bool traceable;
};
// FunctionSignature represents a single valid signature for a Python function.
// It is immutable once constructed. The contained data can be concurrently
// accessed by multiple calls.
struct FunctionSignature {
explicit FunctionSignature(const std::string& fmt, int index);
bool parse(
PyObject* self,
PyObject* args,
PyObject* kwargs,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
PyObject* dst[],
std::vector<PyObject*>& overloaded_args,
bool raise_exception);
std::string toString() const;
std::string name;
std::vector<FunctionParameter> params;
size_t min_args;
size_t max_args;
size_t max_pos_args;
int index;
bool hidden;
bool deprecated;
};
// PythonArgs contains bound Python arguments for an actual invocation
// along with references to the matched signature.
struct TORCH_PYTHON_API PythonArgs {
PythonArgs(
bool traceable,
const FunctionSignature& signature,
PyObject** args,
std::vector<PyObject*> overloaded_args)
: idx(signature.index),
traceable(traceable),
signature(signature),
args(args),
overloaded_args(std::move(overloaded_args)) {}
int idx;
bool traceable;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const FunctionSignature& signature;
PyObject** args;
std::vector<PyObject*> overloaded_args; // NOTE: borrowed references
inline bool has_torch_function();
inline std::string get_func_name();
inline at::Tensor tensor(int i);
inline std::optional<at::Tensor> optionalTensor(int i);
inline at::Scalar scalar(int i);
inline at::Scalar scalarWithDefault(int i, const at::Scalar& default_scalar);
inline std::vector<at::Scalar> scalarlist(int i);
inline std::vector<at::Tensor> tensorlist(int i);
inline torch::List<std::optional<at::Tensor>> list_of_optional_tensors(int i);
template <int N>
inline std::array<at::Tensor, N> tensorlist_n(int i);
inline std::vector<int64_t> intlist(int i);
inline std::vector<c10::SymInt> symintlist(int i);
inline c10::OptionalArray<int64_t> intlistOptional(int i);
inline c10::OptionalArray<c10::SymInt> symintlistOptional(int i);
inline std::vector<int64_t> intlistWithDefault(
int i,
std::vector<int64_t> default_intlist);
inline std::optional<at::Generator> generator(int i);
inline at::Storage storage(int i);
inline at::Storage storage(
int i,
at::ScalarType& storage_scalar_type,
bool& is_typed_storage);
inline c10::Stream stream(int i);
inline at::ScalarType scalartype(int i);
inline at::ScalarType scalartypeWithDefault(
int i,
at::ScalarType default_scalartype);
inline std::optional<at::ScalarType> scalartypeOptional(int i);
inline std::optional<at::Scalar> scalarOptional(int i);
inline std::optional<int64_t> toInt64Optional(int i);
inline std::optional<c10::SymInt> toSymIntOptional(int i);
inline std::optional<bool> toBoolOptional(int i);
inline std::optional<double> toDoubleOptional(int i);
inline c10::OptionalArray<double> doublelistOptional(int i);
inline std::vector<double> doublelist(int i);
inline std::vector<double> getDoublelist(int i);
inline at::Layout layout(int i);
inline at::Layout layoutWithDefault(int i, at::Layout default_layout);
inline std::optional<at::Layout> layoutOptional(int i);
inline at::Device device(int i);
inline at::Device deviceWithDefault(int i, const at::Device& default_device);
inline std::optional<at::Device> deviceOptional(int i);
inline at::Dimname dimname(int i);
inline std::vector<at::Dimname> dimnamelist(int i);
inline std::optional<std::vector<at::Dimname>> toDimnameListOptional(int i);
inline at::MemoryFormat memoryformat(int i);
inline std::optional<at::MemoryFormat> memoryformatOptional(int i);
inline at::QScheme toQScheme(int i);
inline std::string string(int i);
inline std::string stringWithDefault(int i, const std::string& default_str);
inline std::optional<std::string> stringOptional(int i);
inline std::string_view stringView(int i);
inline std::string_view stringViewWithDefault(
int i,
const std::string_view default_str);
inline std::optional<std::string_view> stringViewOptional(int i);
inline PyObject* pyobject(int i);
inline int64_t toInt64(int i);
inline c10::SymInt toSymInt(int i);
inline c10::SymBool toSymBool(int i);
inline int64_t toInt64WithDefault(int i, int64_t default_int);
inline double toDouble(int i);
inline double toDoubleWithDefault(int i, double default_double);
inline c10::complex<double> toComplex(int i);
inline c10::complex<double> toComplexWithDefault(
int i,
c10::complex<double> default_complex);
inline bool toBool(int i);
inline bool toBoolWithDefault(int i, bool default_bool);
inline bool isNone(int i);
inline std::optional<c10::DispatchKeySet> toDispatchKeySetOptional(int i);
private:
// Non-inline functions' symbols are exposed to torch_python DLL
// via TORCH_PYTHON_API tag at struct level.
at::Tensor tensor_slow(int i);
at::Scalar scalar_slow(int i);
at::Scalar scalar_slow(PyObject* arg);
};
// FunctionParameter is a single formal parameter of a Python function.
// It is immutable once constructed.
struct FunctionParameter {
FunctionParameter(const std::string& fmt, bool keyword_only);
bool check(
PyObject* obj,
std::vector<PyObject*>& overloaded_args,
int argnum,
int64_t* failed_idx = nullptr);
void set_default_str(const std::string& str);
TORCH_PYTHON_API std::string type_name() const;
ParameterType type_;
bool optional;
bool allow_none;
bool keyword_only;
bool allow_numbers_as_tensors = false;
int size;
std::string name;
// having this as a raw PyObject * will presumably leak it, but these are only
// held by static objects anyway, and Py_Finalize can already be called when
// this is destructed.
PyObject* python_name;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::SmallVector<PyObject*, 5> numpy_python_names;
at::Scalar default_scalar;
std::vector<int64_t> default_intlist;
std::string default_string;
union {
bool default_bool;
int64_t default_int;
double default_double;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
double default_complex[2]; // see Scalar
at::ScalarType default_scalartype;
at::Layout default_layout;
};
std::string default_value;
};
template <int N>
inline PythonArgs PythonArgParser::parse(
PyObject* self,
PyObject* args,
PyObject* kwargs,
ParsedArgs<N>& dst) {
TORCH_CHECK_VALUE(
N >= max_args,
"PythonArgParser: dst ParsedArgs buffer does not have enough capacity, expected ",
max_args,
" (got ",
N,
")");
return raw_parse(self, args, kwargs, dst.args);
}
template <int N>
inline PythonArgs PythonArgParser::parse(
PyObject* args,
PyObject* kwargs,
ParsedArgs<N>& dst) {
return parse(nullptr, args, kwargs, dst);
}
inline PythonArgs PythonArgParser::parse(PyObject* self, ParsedArgs<0>& dst) {
return parse(self, nullptr, nullptr, dst);
}
inline bool PythonArgs::has_torch_function() {
return !overloaded_args.empty() || at::impl::torch_function_mode_enabled();
}
inline std::string PythonArgs::get_func_name() {
return signature.name;
}
// TODO: this can return MaybeOwned
inline at::Tensor PythonArgs::tensor(int i) {
if (args[i] && THPVariable_CheckExact(args[i])) {
return THPVariable_Unpack(args[i]);
}
return tensor_slow(i);
}
inline std::optional<at::Tensor> PythonArgs::optionalTensor(int i) {
at::Tensor t = tensor(i);
// NOLINTNEXTLINE(bugprone-branch-clone)
if (t.defined()) {
return t;
} else {
return std::nullopt;
}
}
inline at::Scalar PythonArgs::scalar(int i) {
if (!args[i])
return signature.params[i].default_scalar;
return scalar_slow(i);
}
inline std::vector<at::Scalar> PythonArgs::scalarlist(int i) {
if (!args[i])
return std::vector<at::Scalar>();
auto tuple = six::isTuple(args[i]);
THPObjectPtr arg = six::maybeAsTuple(args[i]);
// NOLINTNEXTLINE(bugprone-branch-clone)
auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get());
std::vector<at::Scalar> res(size);
for (const auto idx : c10::irange(size)) {
PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx)
: PyList_GET_ITEM(arg.get(), idx);
res[idx] = scalar_slow(obj);
}
return res;
}
inline at::Scalar PythonArgs::scalarWithDefault(
int i,
const at::Scalar& default_scalar) {
if (!args[i])
return default_scalar;
return scalar_slow(i);
}
inline std::optional<at::Scalar> PythonArgs::scalarOptional(int i) {
if (!args[i])
return std::nullopt;
return scalar_slow(i);
}
inline std::vector<at::Tensor> PythonArgs::tensorlist(int i) {
if (!args[i])
return std::vector<at::Tensor>();
auto tuple = six::isTuple(args[i]);
THPObjectPtr arg = six::maybeAsTuple(args[i]);
// NOLINTNEXTLINE(bugprone-branch-clone)
auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get());
std::vector<at::Tensor> res(size);
for (const auto idx : c10::irange(size)) {
PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx)
: PyList_GET_ITEM(arg.get(), idx);
// This is checked by the argument parser so it's safe to cast without
// checking if this is a tensor first
res[idx] = THPVariable_Unpack(obj);
}
return res;
}
inline torch::List<std::optional<at::Tensor>> PythonArgs::
list_of_optional_tensors(int i) {
if (!args[i])
return torch::List<std::optional<at::Tensor>>();
auto tuple = six::isTuple(args[i]);
THPObjectPtr arg = six::maybeAsTuple(args[i]);
// NOLINTNEXTLINE(bugprone-branch-clone)
auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get());
torch::List<std::optional<at::Tensor>> res;
res.reserve(size);
for (const auto idx : c10::irange(size)) {
PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx)
: PyList_GET_ITEM(arg.get(), idx);
// This is checked by the argument parser so it's safe to cast without
// checking if this is a tensor first
res.push_back(THPVariable_Unpack(obj));
}
return res;
}
template <int N>
inline std::array<at::Tensor, N> PythonArgs::tensorlist_n(int i) {
auto res = std::array<at::Tensor, N>();
if (!args[i])
return res;
auto tuple = six::isTuple(args[i]);
THPObjectPtr arg = six::maybeAsTuple(args[i]);
// NOLINTNEXTLINE(bugprone-branch-clone)
auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get());
if (size != N) {
throw TypeError("expected tuple of %d elements but got %d", N, (int)size);
}
for (const auto idx : c10::irange(size)) {
PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx)
: PyList_GET_ITEM(arg.get(), idx);
// This is checked by the argument parser so it's safe to cast without
// checking if this is a tensor first
res[idx] = THPVariable_Unpack(obj);
}
return res;
}
inline std::vector<int64_t> PythonArgs::intlist(int i) {
return intlistWithDefault(i, signature.params[i].default_intlist);
}
inline PyObject* toPyObject(const c10::SymInt& symint) {
if (symint.is_symbolic()) {
auto r = py::cast(symint).release().ptr();
TORCH_INTERNAL_ASSERT(r);
return r;
} else {
auto m = symint.maybe_as_int();
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
return THPUtils_packInt64(m.value());
}
}
inline void throw_intlist_exception(
const torch::PythonArgs* args,
size_t i,
PyObject* obj,
size_t idx,
const std::exception& e = python_error()) {
std::string error = strlen(e.what())
? e.what()
: std::string("type must be ") + args->signature.params[i].type_name() +
",but got " + Py_TYPE(obj)->tp_name;
throw TypeError(
"%s(): argument '%s' failed to unpack the object at pos %zu with error \"%s\"",
args->signature.name.c_str(),
args->signature.params[i].name.c_str(),
idx + 1,
error.c_str());
}
inline std::vector<c10::SymInt> PythonArgs::symintlist(int i) {
if (!args[i]) {
return c10::fmap(signature.params[i].default_intlist, [](int64_t di) {
return c10::SymInt(di);
});
}
const auto size1 = signature.params[i].size;
if (size1 > 0 && THPUtils_checkLong(args[i])) {
return std::vector<c10::SymInt>(
size1, c10::SymInt(THPUtils_unpackLong(args[i])));
}
if (size1 > 0 && torch::is_symint(py::handle(args[i]))) {
auto si = py::handle(args[i]).cast<c10::SymInt>();
return std::vector<c10::SymInt>(size1, si);
}
PyObject* arg = args[i];
auto tuple = PyTuple_Check(arg);
// NOLINTNEXTLINE(bugprone-branch-clone)
const auto size2 = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg);
std::vector<c10::SymInt> res;
res.reserve(size2);
for (const auto idx : c10::irange(size2)) {
PyObject* obj =
tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx);
// Elements of torch.Size are tensors during tracing, and we need to
// record extra information before they are turned into an IntArrayRef
if (traceable && jit::tracer::isTracing() && THPVariable_Check(obj)) {
auto& var = THPVariable_Unpack(obj);
jit::tracer::ArgumentStash::stashIntArrayRefElem(
signature.params[i].name, size2, idx, var);
try {
res.emplace_back(var.item<int64_t>());
continue;
} catch (std::exception& e) {
throw_intlist_exception(this, i, obj, idx, e);
}
continue;
} else {
// convert tensor to scalar outside of try / catch,
// so that Tensor subclass exceptions will not be caught.
if (THPUtils_checkLongExact(obj)) {
// Fast path for plain numbers
try {
res.emplace_back(THPUtils_unpackLong(obj));
} catch (std::exception& e) {
throw_intlist_exception(this, i, obj, idx, e);
}
} else if (THPVariable_Check(obj)) {
auto& var = THPVariable_Unpack(obj);
if (var.numel() != 1 ||
!at::isIntegralType(
var.dtype().toScalarType(), /*include_bool*/ true)) {
throw_intlist_exception(this, i, obj, idx);
}
auto scalar = var.item();
TORCH_CHECK(scalar.isIntegral(/*include bool*/ false));
res.push_back(scalar.toSymInt());
} else {
try {
if (is_symint(py::handle(obj))) {
res.push_back(py::handle(obj).cast<c10::SymInt>());
} else {
res.emplace_back(THPUtils_unpackIndex(obj));
}
} catch (std::exception& e) {
throw_intlist_exception(this, i, obj, idx, e);
}
}
}
}
return res;
}
inline std::vector<int64_t> PythonArgs::intlistWithDefault(
int i,
std::vector<int64_t> default_intlist) {
if (!args[i])
return default_intlist;
PyObject* arg = args[i];
const auto size1 = signature.params[i].size;
if (size1 > 0 && THPUtils_checkLong(arg)) {
return std::vector<int64_t>(size1, THPUtils_unpackLong(arg));
}
if (size1 > 0 && torch::is_symint(py::handle(arg))) {
return std::vector<int64_t>(
size1,
py::handle(arg).cast<c10::SymInt>().guard_int(__FILE__, __LINE__));
}
auto tuple = PyTuple_Check(arg);
// NOLINTNEXTLINE(bugprone-branch-clone)
const auto size2 = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg);
std::vector<int64_t> res(size2);
for (const auto idx : c10::irange(size2)) {
PyObject* obj =
tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx);
// Elements of torch.Size are tensors during tracing, and we need to
// record extra information before they are turned into an IntArrayRef
if (traceable && jit::tracer::isTracing() && THPVariable_Check(obj)) {
auto& var = THPVariable_Unpack(obj);
jit::tracer::ArgumentStash::stashIntArrayRefElem(
signature.params[i].name, size2, idx, var);
try {
res[idx] = var.item<int64_t>();
continue;
} catch (std::exception& e) {
throw_intlist_exception(this, i, obj, idx, e);
}
} else {
// convert tensor to scalar outside of try / catch,
// so that Tensor subclass exceptions will not be caught.
if (THPUtils_checkLongExact(obj)) {
// Fast path for plain numbers
try {
res[idx] = THPUtils_unpackLong(obj);
} catch (std::exception& e) {
throw_intlist_exception(this, i, obj, idx, e);
}
} else if (torch::is_symint(py::handle(obj))) {
res[idx] = py::cast<c10::SymInt>(py::handle(obj))
.guard_int(__FILE__, __LINE__);
} else if (THPVariable_Check(obj)) {
auto& var = THPVariable_Unpack(obj);
if (var.numel() != 1 ||
!at::isIntegralType(
var.dtype().toScalarType(), /*include_bool*/ true)) {
throw_intlist_exception(this, i, obj, idx);
}
res[idx] = var.item<int64_t>();
} else {
try {
res[idx] = THPUtils_unpackIndex(obj);
} catch (std::exception& e) {
throw_intlist_exception(this, i, obj, idx, e);
}
}
}
}
return res;
}
inline c10::OptionalArray<int64_t> PythonArgs::intlistOptional(int i) {
if (!args[i]) {
return {};
}
return intlist(i);
}
inline c10::OptionalArray<c10::SymInt> PythonArgs::symintlistOptional(int i) {
if (!args[i]) {
return {};
}
return symintlist(i);
}
inline std::vector<double> PythonArgs::getDoublelist(int i) {
PyObject* arg = args[i];
auto tuple = PyTuple_Check(arg);
// NOLINTNEXTLINE(bugprone-branch-clone)
auto size = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg);
std::vector<double> res(size);
for (const auto idx : c10::irange(size)) {
PyObject* obj =
tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx);
try {
if (torch::is_symfloat(py::handle(obj))) {
res[idx] = py::cast<c10::SymFloat>(py::handle(obj))
.guard_float(__FILE__, __LINE__);
} else {
res[idx] = THPUtils_unpackDouble(obj);
}
} catch (const std::exception&) {
throw TypeError(
"%s(): argument '%s' must be %s, but found element of type %s at pos %zu",
signature.name.c_str(),
signature.params[i].name.c_str(),
signature.params[i].type_name().c_str(),
Py_TYPE(obj)->tp_name,
idx + 1);
}
}
return res;
}
inline c10::OptionalArray<double> PythonArgs::doublelistOptional(int i) {
if (!args[i]) {
return {};
}
return this->getDoublelist(i);
}
inline std::vector<double> PythonArgs::doublelist(int i) {
if (!args[i]) {
return {};
}
return this->getDoublelist(i);
}
inline std::optional<c10::DispatchKeySet> PythonArgs::toDispatchKeySetOptional(
int i) {
if (!args[i]) {
return {};
}
return py::cast<c10::DispatchKeySet>(py::handle(args[i]));
}
inline at::ScalarType PythonArgs::scalartypeWithDefault(
int i,
at::ScalarType default_scalartype) {
if (!args[i])
return default_scalartype;
return scalartype(i);
}
inline at::ScalarType toScalarType(PyObject* obj) {
if (obj == (PyObject*)&PyFloat_Type) {
return at::ScalarType::Double;
}
if (obj == (PyObject*)&PyBool_Type) {
return at::ScalarType::Bool;
}
if (obj == (PyObject*)&PyLong_Type) {
return at::ScalarType::Long;
}
if (obj == (PyObject*)&PyComplex_Type) {
return at::ScalarType::ComplexDouble;
}
return reinterpret_cast<THPDtype*>(obj)->scalar_type;
}
inline at::ScalarType PythonArgs::scalartype(int i) {
if (!args[i]) {
auto scalartype = signature.params[i].default_scalartype;
return (scalartype == at::ScalarType::Undefined)
? torch::tensors::get_default_scalar_type()
: scalartype;
}
PyObject* obj = args[i];
return toScalarType(obj);
}
inline std::optional<at::ScalarType> PythonArgs::scalartypeOptional(int i) {
if (!args[i])
return std::nullopt;
return scalartype(i);
}
inline at::Layout toLayout(PyObject* obj) {
const auto layout = reinterpret_cast<THPLayout*>(obj);
return layout->layout;
}
inline at::Layout PythonArgs::layout(int i) {
if (!args[i])
return signature.params[i].default_layout;
return toLayout(args[i]);
}
inline at::Layout PythonArgs::layoutWithDefault(
int i,
at::Layout default_layout) {
if (!args[i])
return default_layout;
return layout(i);
}
inline std::optional<at::Layout> PythonArgs::layoutOptional(int i) {
if (!args[i])
return std::nullopt;
return layout(i);
}
inline at::Device deviceFromLong(int64_t device_index) {
TORCH_CHECK(device_index >= 0, "Device index must not be negative");
return at::Device(
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
at::getAccelerator(true).value(),
static_cast<c10::DeviceIndex>(device_index));
}
inline at::Device toDevice(PyObject* obj) {
if (THPDevice_Check(obj)) {
const auto device = reinterpret_cast<THPDevice*>(obj);
return device->device;
}
if (THPUtils_checkLong(obj)) {
return deviceFromLong(THPUtils_unpackLong(obj));
}
if (torch::is_symint(py::handle(obj))) {
auto device_index =
py::cast<c10::SymInt>(py::handle(obj)).guard_int(__FILE__, __LINE__);
return deviceFromLong(device_index);
}
const std::string& device_str = THPUtils_unpackString(obj);
return at::Device(device_str);
}
inline at::Device PythonArgs::device(int i) {
if (!args[i]) {
return torch::tensors::get_default_device();
}
return toDevice(args[i]);
}
inline at::Device PythonArgs::deviceWithDefault(
int i,
const at::Device& default_device) {
if (!args[i])
return default_device;
return device(i);
}
inline std::optional<at::Device> PythonArgs::deviceOptional(int i) {
if (!args[i])
return std::nullopt;
return device(i);
}
inline at::Dimname PythonArgs::dimname(int i) {
TORCH_INTERNAL_ASSERT(args[i] != nullptr);
return THPDimname_parse(args[i]);
}
inline std::vector<at::Dimname> parseDimnameList(PyObject* arg) {
auto tuple = PyTuple_Check(arg);
// NOLINTNEXTLINE(bugprone-branch-clone)
auto size = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg);
std::vector<at::Dimname> res;
res.reserve(size);
for (const auto idx : c10::irange(size)) {
PyObject* obj =
tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx);
res.push_back(THPDimname_parse(obj));
}
return res;
}
inline std::optional<std::vector<at::Dimname>> PythonArgs::
toDimnameListOptional(int i) {
if (!args[i])
return std::nullopt;
return parseDimnameList(args[i]);
}
inline std::vector<at::Dimname> PythonArgs::dimnamelist(int i) {
TORCH_INTERNAL_ASSERT(args[i]);
PyObject* arg = args[i];
auto size = signature.params[i].size;
TORCH_INTERNAL_ASSERT(size == 0 || size == 1);
if (size == 1 && THPUtils_checkDimname(arg)) {
return {THPDimname_parse(arg)};
}
return parseDimnameList(arg);
}
inline at::MemoryFormat PythonArgs::memoryformat(int i) {
if (!args[i])
return at::MemoryFormat::Contiguous;
TORCH_CHECK(
THPMemoryFormat_Check(args[i]),
"memory_format arg must be an instance of the torch.memory_format");
const auto memory_format = reinterpret_cast<THPMemoryFormat*>(args[i]);
return memory_format->memory_format;
}
inline std::optional<at::MemoryFormat> PythonArgs::memoryformatOptional(int i) {
if (!args[i])
return std::nullopt;
return memoryformat(i);
}
inline at::QScheme PythonArgs::toQScheme(int i) {
if (!args[i])
return at::kPerTensorAffine;
TORCH_CHECK(
THPQScheme_Check(args[i]),
"qscheme arg must be an instance of the torch.qscheme");
const auto qscheme = reinterpret_cast<THPQScheme*>(args[i]);
return qscheme->qscheme;
}
inline std::string PythonArgs::string(int i) {
return stringWithDefault(i, signature.params[i].default_string);
}
inline std::string PythonArgs::stringWithDefault(
int i,
const std::string& default_str) {
if (!args[i])
return default_str;
return THPUtils_unpackString(args[i]);
}
inline std::optional<std::string> PythonArgs::stringOptional(int i) {
if (!args[i])
return std::nullopt;
return THPUtils_unpackString(args[i]);
}
inline std::string_view PythonArgs::stringView(int i) {
return stringViewWithDefault(i, signature.params[i].default_string);
}
inline std::string_view PythonArgs::stringViewWithDefault(
int i,
const std::string_view default_str) {
if (!args[i])
return default_str;
return THPUtils_unpackStringView(args[i]);
}
inline std::optional<std::string_view> PythonArgs::stringViewOptional(int i) {
if (!args[i])
return std::nullopt;
return THPUtils_unpackStringView(args[i]);
}
inline int64_t PythonArgs::toInt64(int i) {
if (!args[i])
return signature.params[i].default_int;
if (traceable && jit::tracer::isTracing() && THPVariable_Check(args[i])) {
auto& var = THPVariable_Unpack(args[i]);
jit::tracer::ArgumentStash::stashValue(
signature.params[i].name, idx, var, c10::IntType::get());
}
if (torch::is_symint(py::handle(args[i]))) {
return py::cast<c10::SymInt>(py::handle(args[i]))
.guard_int(__FILE__, __LINE__);
}
return THPUtils_unpackLong(args[i]);
}
inline c10::SymInt PythonArgs::toSymInt(int i) {
if (!args[i]) {
return c10::SymInt(signature.params[i].default_int);
}
if (traceable && jit::tracer::isTracing() && THPVariable_Check(args[i])) {
auto& var = THPVariable_Unpack(args[i]);
jit::tracer::ArgumentStash::stashValue(
signature.params[i].name, idx, var, c10::IntType::get());
}
return py::cast<c10::SymInt>(py::handle(args[i]));
}
inline c10::SymBool PythonArgs::toSymBool(int i) {
if (!args[i]) {
return c10::SymBool(signature.params[i].default_bool);
}
if (traceable && jit::tracer::isTracing() && THPVariable_Check(args[i])) {
auto& var = THPVariable_Unpack(args[i]);
jit::tracer::ArgumentStash::stashValue(
signature.params[i].name, idx, var, c10::BoolType::get());
}
return py::cast<c10::SymBool>(py::handle(args[i]));
}
inline int64_t PythonArgs::toInt64WithDefault(int i, int64_t default_int) {
if (!args[i])
return default_int;
return toInt64(i);
}
inline std::optional<int64_t> PythonArgs::toInt64Optional(int i) {
if (!args[i])
return std::nullopt;
return toInt64(i);
}
inline std::optional<c10::SymInt> PythonArgs::toSymIntOptional(int i) {
if (!args[i])
return std::nullopt;
return toSymInt(i);
}
inline std::optional<bool> PythonArgs::toBoolOptional(int i) {
if (!args[i]) {
return std::nullopt;
}
return toBool(i);
}
inline std::optional<double> PythonArgs::toDoubleOptional(int i) {
if (!args[i]) {
return std::nullopt;
}
return toDouble(i);
}
inline double PythonArgs::toDouble(int i) {
if (!args[i])
return signature.params[i].default_double;
if (torch::is_symfloat(py::handle(args[i]))) {
return py::cast<c10::SymFloat>(py::handle(args[i]))
.guard_float(__FILE__, __LINE__);
}
if (torch::is_symint(py::handle(args[i]))) {
return static_cast<double>(py::cast<c10::SymInt>(py::handle(args[i]))
.guard_int(__FILE__, __LINE__));
}
return THPUtils_unpackDouble(args[i]);
}
inline bool PythonArgs::toBool(int i) {
if (!args[i])
return signature.params[i].default_bool;
if (torch::is_symbool(py::handle(args[i]))) {
return py::cast<c10::SymBool>(py::handle(args[i]))
.guard_bool(__FILE__, __LINE__);
}
return args[i] == Py_True;
}
inline double PythonArgs::toDoubleWithDefault(int i, double default_double) {
if (!args[i])
return default_double;
return toDouble(i);
}
inline c10::complex<double> PythonArgs::toComplex(int i) {
if (!args[i])
return *(reinterpret_cast<const c10::complex<double>*>(
signature.params[i].default_complex));
return THPUtils_unpackComplexDouble(args[i]);
}
inline c10::complex<double> PythonArgs::toComplexWithDefault(
int i,
c10::complex<double> default_complex) {
if (!args[i])
return default_complex;
return toComplex(i);
}
inline bool PythonArgs::toBoolWithDefault(int i, bool default_bool) {
if (!args[i])
return default_bool;
return toBool(i);
}
inline bool PythonArgs::isNone(int i) {
return args[i] == nullptr;
}
inline std::optional<at::Generator> PythonArgs::generator(int i) {
if (!args[i])
return std::nullopt;
return reinterpret_cast<THPGenerator*>(args[i])->cdata;
}
inline at::Storage PythonArgs::storage(int i) {
if (!args[i])
return at::Storage();
return createStorage(args[i]);
}
inline at::Storage PythonArgs::storage(
int i,
at::ScalarType& storage_scalar_type,
bool& is_typed_storage) {
at::Storage storage;
if (!args[i]) {
storage = at::Storage();
is_typed_storage = false;
storage_scalar_type = at::ScalarType::Undefined;
} else {
std::tie(storage, storage_scalar_type, is_typed_storage) =
createStorageGetType(args[i]);
}
return storage;
}
inline c10::Stream PythonArgs::stream(int i) {
if (!args[i])
return c10::Stream(
c10::Stream::Default::DEFAULT, c10::Device(c10::DeviceType::CPU, -1));
if (!THPStream_Check(args[i])) {
throw TypeError(
"expected Stream object. Got '%s'", Py_TYPE(args[i])->tp_name);
}
return c10::Stream::unpack3(
((THPStream*)args[i])->stream_id,
static_cast<c10::DeviceIndex>(((THPStream*)args[i])->device_index),
static_cast<c10::DeviceType>(((THPStream*)args[i])->device_type));
}
inline PyObject* PythonArgs::pyobject(int i) {
if (!args[i])
return Py_None;
return args[i];
}
/*
*
* Handle __torch_function__ overrides if we know that there are overloaded
* arguments. All objects stored in r.overloaded_args must have a
* __torch_function__ implementation and the arguments must be ordered in order
* of precedence. Precedence goes from left to right in the order of the
* signature of the function the overloaded arguments were passed to, except
* subclasses are always considered before superclasses.
*
* If the result of calling __torch_function__ is NotImplemented, the
* next implementation in the precedence order is called. If all
* arguments return NotImplemented from their __torch_function__
* implementation, a TypeError is raised in Python.
*
* Assumes overloaded_args has at least one entry. All entries must have
* a __torch_function__ attribute that resolves to a callable that
* accepts a torch API function, a tuple of arguments, and a dict of
* keyword arguments for the torch API function.
*
* It is sufficient to call PythonArgs::has_torch_function before
* calling this function to verify that there are valid arguments
* present. If that is not done then special care must be taken to
* ensure there are arguments that are overloaded with
* __torch_function__.
*
* See torch._overrides.handle_torch_function for the equivalent
* code in the pure-python implementation.
*
* 'r' is a parsed PythonArgs instance, returned from
* PythonArgParser::parse.
*
* 'args' is a reference to the python tuple of arguments to the torch
* API function.
*
* 'kwargs' is a reference to the python dict of keyword arguments to
* the torch API function.
*
* 'torch_api' is a reference to a python torch API namespace.
*
* 'torch_api_function' is the reference to the original torch method, usually,
* we can use torch_api and func_name to get torch_api_function. In some cases,
* e.g., torch custom op, we create the function in C++, if we still use
* torch_api and func_name to fetch original api, a cyclic call will happen.
*
* 'overloaded_args' is the args which have overloaded __torch_function__.
*
* 'func_name' is the named of the original torch method.
*
* TODO: we could use different names for the following 'handle_torch_function'
* instead of overloading.
*
*/
// Used for Tensor methods with arguments.
auto handle_torch_function(
PythonArgs& r,
PyObject* self,
PyObject* args,
PyObject* kwargs,
PyObject* torch_api,
const char* module_name,
const char* func_name_override = nullptr) -> PyObject*;
// Used for functions which needs to parse python args.
auto handle_torch_function(
PythonArgs& r,
PyObject* args,
PyObject* kwargs,
PyObject* torch_api,
const char* module_name,
const char* func_name_override = nullptr) -> PyObject*;
// Used for functions that have no argument parsing.
auto handle_torch_function(
PyObject* self,
const std::string& func_name,
PyObject* args = nullptr,
PyObject* kwargs = nullptr,
PyObject* torch_api = THPVariableClass,
const std::string& module_name = "torch.Tensor") -> PyObject*;
// Used for functions created in C++, e.g., C++ custom op, which doesn't use
// PythonArgParser to get overloaded_args.
enum class TorchFunctionName { TorchFunction, TorchDispatch };
auto TORCH_PYTHON_API handle_torch_function_no_python_arg_parser(
at::ArrayRef<PyObject*> overloaded_args,
PyObject* args,
PyObject* kwargs,
const char* func_name,
PyObject* torch_api_function,
const char* module_name,
TorchFunctionName torch_function_name = TorchFunctionName::TorchFunction)
-> PyObject*;
// Used for getters of Tensor properties
auto handle_torch_function_getter(
THPVariable* self,
const std::string& property_name) -> PyObject*;
// Used for setters of Tensor properties.
auto handle_torch_function_setter(
THPVariable* self,
const std::string& property_name,
PyObject* value) -> int;
// Used for __getitem__ and __setitem__
auto handle_torch_function_indexing(
PyObject* self,
PyObject* index,
PyObject* val = nullptr) -> PyObject*;
/*
* Check if the input obj is Tensor type, including its subclass, or overloaded
* type. If the type defines __torch_function__, it also returns true.
* Otherwise returns flase. If the class is not torch.Tensor, and it defines
* __torch_function__, we append obj to overloaded_args.
*
* 'obj': the input argument to be checked
* 'overloaded_args': the vector to append the overloaded args.
*/
bool is_tensor_and_append_overloaded(
PyObject* obj,
std::vector<PyObject*>* overloaded_args);
/*
* Check if the input obj is Tensor List or Tensor Tuple type. First check
* whether obj is Tuple or List type, if true, iterate over each element and
* check whether it is Tensor type, including its subclass or overloaded type.
* At the same time, the overloaded arg is appended to the overloaded_args.
*
* 'obj': the input argument to be checked
* 'overloaded_args': the vector to append the overloaded args.
* 'argnum': the number of total arguments of the function being checked.
* 'throw_error': whether throw error if any element in the list or tuple is
* not tensor type or overloaded.
*/
bool is_tensor_list_and_append_overloaded(
PyObject* obj,
std::vector<PyObject*>* overloaded_args,
size_t argnum,
bool throw_error);
/* Given an argument that is definitely a tensor and is definitely overloaded,
* append it to the overloaded arguments list. Use this instead of
* is_tensor_and_append_overloaded in situations where you have a PyObject
* and you know it definitely is a Tensor and it is definitely overloaded.
*
* 'overloaded_args': the vector to append the overloaded args
* 'obj': the input tensor that is overloaded
*/
void append_overloaded_tensor(
std::vector<PyObject*>* overloaded_args,
PyObject* obj);
/* Given an argument that is definitely a type and is definitely overloaded,
* append it to the overloaded arguments list. Use this only with
* __torch_dispatch__, where we operate on classes that have a
* __torch_dispatch__ classmethod.
*
* 'overloaded_args': the vector to append the overloaded type
* 'obj': the input class that has a __torch_dispatch__ classmethod.
*/
void append_overloaded_type(
std::vector<PyObject*>* overloaded_args,
PyObject* obj);
} // namespace torch
```
|
======================================================================================================================================
SOURCE CODE FILE: python_compat.h
LINES: 1
SIZE: 1.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\python_compat.h
ENCODING: utf-8
```h
#ifndef PYTHON_COMPAT
#define PYTHON_COMPAT
#include <torch/csrc/utils/pythoncapi_compat.h>
#ifdef __cplusplus
extern "C" {
#endif
// PyTorch-only compat functions
#define IS_PYTHON_3_11_PLUS PY_VERSION_HEX >= 0x030B00C1
#define IS_PYTHON_3_12_PLUS PY_VERSION_HEX >= 0x030C0000
#define IS_PYTHON_3_13_PLUS PY_VERSION_HEX >= 0x030D0000
#define IS_PYTHON_3_14_PLUS PY_VERSION_HEX >= 0x030E0000
static inline int PyCode_GetNCellvars(PyCodeObject* code) {
// gh-26364 added co_ncellvars to Python 3.11.0rc1
#if IS_PYTHON_3_11_PLUS
return code->co_ncellvars;
#else
return PyTuple_GET_SIZE(code->co_cellvars);
#endif
}
static inline int PyCode_GetNFreevars(PyCodeObject* code) {
// gh-26364 added co_nfreevars to Python 3.11.0rc1
#if IS_PYTHON_3_11_PLUS
return code->co_nfreevars;
#else
return PyTuple_GET_SIZE(code->co_freevars);
#endif
}
// Provided by CPython but getting the header for them is very hard
#if IS_PYTHON_3_11_PLUS
// NOLINTNEXTLINE(readability-redundant-declaration)
PyAPI_FUNC(void) _PyWeakref_ClearRef(PyWeakReference* self);
#else
extern void _PyWeakref_ClearRef(PyWeakReference* self);
#endif
#ifdef __cplusplus
}
#endif
#endif // PYTHON_COMPAT
```
|
========================================================================================================================================
SOURCE CODE FILE: python_dispatch.h
LINES: 1
SIZE: 0.40 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\python_dispatch.h
ENCODING: utf-8
```h
#include <pybind11/pybind11.h>
#include <torch/csrc/utils/pybind.h>
namespace torch::impl::dispatch {
void initDispatchBindings(PyObject* module);
void python_op_registration_trampoline_impl(
const c10::OperatorHandle& op,
c10::DispatchKey key,
c10::DispatchKeySet keyset,
torch::jit::Stack* stack,
bool with_keyset,
bool with_op);
} // namespace torch::impl::dispatch
```
|
=======================================================================================================================================
SOURCE CODE FILE: python_numbers.h
LINES: 1
SIZE: 5.52 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\python_numbers.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/Device.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/jit/frontend/tracer.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/object_ptr.h>
#include <torch/csrc/utils/tensor_numpy.h>
#include <cstdint>
#include <limits>
#include <stdexcept>
// largest integer that can be represented consecutively in a double
const int64_t DOUBLE_INT_MAX = 9007199254740992;
inline PyObject* THPUtils_packDeviceIndex(c10::DeviceIndex value) {
return PyLong_FromLong(value);
}
inline PyObject* THPUtils_packInt32(int32_t value) {
return PyLong_FromLong(value);
}
inline PyObject* THPUtils_packInt64(int64_t value) {
return PyLong_FromLongLong(value);
}
inline PyObject* THPUtils_packUInt32(uint32_t value) {
return PyLong_FromUnsignedLong(value);
}
inline PyObject* THPUtils_packUInt64(uint64_t value) {
return PyLong_FromUnsignedLongLong(value);
}
inline PyObject* THPUtils_packDoubleAsInt(double value) {
return PyLong_FromDouble(value);
}
inline bool THPUtils_checkLongExact(PyObject* obj) {
return PyLong_CheckExact(obj) && !PyBool_Check(obj);
}
inline bool THPUtils_checkLong(PyObject* obj) {
// Fast path
if (THPUtils_checkLongExact(obj)) {
return true;
}
#ifdef USE_NUMPY
if (torch::utils::is_numpy_int(obj)) {
return true;
}
#endif
return PyLong_Check(obj) && !PyBool_Check(obj);
}
inline int32_t THPUtils_unpackInt(PyObject* obj) {
int overflow = 0;
long value = PyLong_AsLongAndOverflow(obj, &overflow);
if (value == -1 && PyErr_Occurred()) {
throw python_error();
}
if (overflow != 0) {
throw std::runtime_error("Overflow when unpacking long");
}
if (value > std::numeric_limits<int32_t>::max() ||
value < std::numeric_limits<int32_t>::min()) {
throw std::runtime_error("Overflow when unpacking long");
}
return (int32_t)value;
}
inline int64_t THPUtils_unpackLong(PyObject* obj) {
int overflow = 0;
long long value = PyLong_AsLongLongAndOverflow(obj, &overflow);
if (value == -1 && PyErr_Occurred()) {
throw python_error();
}
if (overflow != 0) {
throw std::runtime_error("Overflow when unpacking long");
}
return (int64_t)value;
}
inline uint32_t THPUtils_unpackUInt32(PyObject* obj) {
unsigned long value = PyLong_AsUnsignedLong(obj);
if (PyErr_Occurred()) {
throw python_error();
}
if (value > std::numeric_limits<uint32_t>::max()) {
throw std::runtime_error("Overflow when unpacking unsigned long");
}
return (uint32_t)value;
}
inline uint64_t THPUtils_unpackUInt64(PyObject* obj) {
unsigned long long value = PyLong_AsUnsignedLongLong(obj);
if (PyErr_Occurred()) {
throw python_error();
}
return (uint64_t)value;
}
bool THPUtils_checkIndex(PyObject* obj);
inline int64_t THPUtils_unpackIndex(PyObject* obj) {
if (!THPUtils_checkLong(obj)) {
auto index = THPObjectPtr(PyNumber_Index(obj));
if (index == nullptr) {
throw python_error();
}
// NB: This needs to be called before `index` goes out of scope and the
// underlying object's refcount is decremented
return THPUtils_unpackLong(index.get());
}
return THPUtils_unpackLong(obj);
}
inline bool THPUtils_unpackBool(PyObject* obj) {
if (obj == Py_True) {
return true;
} else if (obj == Py_False) {
return false;
} else {
throw std::runtime_error("couldn't convert python object to boolean");
}
}
inline bool THPUtils_checkBool(PyObject* obj) {
#ifdef USE_NUMPY
if (torch::utils::is_numpy_bool(obj)) {
return true;
}
#endif
return PyBool_Check(obj);
}
inline bool THPUtils_checkDouble(PyObject* obj) {
#ifdef USE_NUMPY
if (torch::utils::is_numpy_scalar(obj)) {
return true;
}
#endif
return PyFloat_Check(obj) || PyLong_Check(obj);
}
inline double THPUtils_unpackDouble(PyObject* obj) {
if (PyFloat_Check(obj)) {
return PyFloat_AS_DOUBLE(obj);
}
double value = PyFloat_AsDouble(obj);
if (value == -1 && PyErr_Occurred()) {
throw python_error();
}
return value;
}
inline c10::complex<double> THPUtils_unpackComplexDouble(PyObject* obj) {
Py_complex value = PyComplex_AsCComplex(obj);
if (value.real == -1.0 && PyErr_Occurred()) {
throw python_error();
}
return c10::complex<double>(value.real, value.imag);
}
inline bool THPUtils_unpackNumberAsBool(PyObject* obj) {
if (PyFloat_Check(obj)) {
return (bool)PyFloat_AS_DOUBLE(obj);
}
if (PyComplex_Check(obj)) {
double real_val = PyComplex_RealAsDouble(obj);
double imag_val = PyComplex_ImagAsDouble(obj);
return !(real_val == 0 && imag_val == 0);
}
int overflow = 0;
long long value = PyLong_AsLongLongAndOverflow(obj, &overflow);
if (value == -1 && PyErr_Occurred()) {
throw python_error();
}
// No need to check overflow, because when overflow occured, it should
// return true in order to keep the same behavior of numpy.
return (bool)value;
}
inline c10::DeviceIndex THPUtils_unpackDeviceIndex(PyObject* obj) {
int overflow = 0;
long value = PyLong_AsLongAndOverflow(obj, &overflow);
if (value == -1 && PyErr_Occurred()) {
throw python_error();
}
if (overflow != 0) {
throw std::runtime_error("Overflow when unpacking DeviceIndex");
}
if (value > std::numeric_limits<c10::DeviceIndex>::max() ||
value < std::numeric_limits<c10::DeviceIndex>::min()) {
throw std::runtime_error("Overflow when unpacking DeviceIndex");
}
return (c10::DeviceIndex)value;
}
```
|
====================================================================================================================================
SOURCE CODE FILE: python_raii.h
LINES: 1
SIZE: 2.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\python_raii.h
ENCODING: utf-8
```h
#include <torch/csrc/utils/pybind.h>
#include <optional>
#include <tuple>
namespace torch::impl {
template <typename GuardT, typename... Args>
struct RAIIContextManager {
explicit RAIIContextManager(Args&&... args)
: args_(std::forward<Args>(args)...) {}
void enter() {
auto emplace = [&](Args... args) {
guard_.emplace(std::forward<Args>(args)...);
};
std::apply(std::move(emplace), args_);
}
void exit() {
guard_ = std::nullopt;
}
private:
std::optional<GuardT> guard_;
std::tuple<Args...> args_;
};
// Turns a C++ RAII guard into a Python context manager.
// See _ExcludeDispatchKeyGuard in python_dispatch.cpp for example.
template <typename GuardT, typename... GuardArgs>
void py_context_manager(const py::module& m, const char* name) {
using ContextManagerT = RAIIContextManager<GuardT, GuardArgs...>;
py::class_<ContextManagerT>(m, name)
.def(py::init<GuardArgs...>())
.def("__enter__", [](ContextManagerT& guard) { guard.enter(); })
.def(
"__exit__",
[](ContextManagerT& guard,
const py::object& exc_type,
const py::object& exc_value,
const py::object& traceback) { guard.exit(); });
}
template <typename GuardT, typename... Args>
struct DeprecatedRAIIContextManager {
explicit DeprecatedRAIIContextManager(Args&&... args) {
guard_.emplace(std::forward<Args>(args)...);
}
void enter() {}
void exit() {
guard_ = std::nullopt;
}
private:
std::optional<GuardT> guard_;
std::tuple<Args...> args_;
};
// Definition: a "Python RAII guard" is an object in Python that acquires
// a resource on init and releases the resource on deletion.
//
// This API turns a C++ RAII guard into an object can be used either as a
// Python context manager or as a "Python RAII guard".
//
// Please prefer `py_context_manager` to this API if you are binding a new
// RAII guard into Python because "Python RAII guards" don't work as expected
// in Python (Python makes no guarantees about when an object gets deleted)
template <typename GuardT, typename... GuardArgs>
void py_context_manager_DEPRECATED(const py::module& m, const char* name) {
using ContextManagerT = DeprecatedRAIIContextManager<GuardT, GuardArgs...>;
py::class_<ContextManagerT>(m, name)
.def(py::init<GuardArgs...>())
.def("__enter__", [](ContextManagerT& guard) { guard.enter(); })
.def(
"__exit__",
[](ContextManagerT& guard,
const py::object& exc_type,
const py::object& exc_value,
const py::object& traceback) { guard.exit(); });
}
} // namespace torch::impl
```
|
=======================================================================================================================================
SOURCE CODE FILE: python_scalars.h
LINES: 1
SIZE: 6.16 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\python_scalars.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <c10/util/TypeCast.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/utils/python_numbers.h>
namespace torch::utils {
template <typename T>
inline T unpackIntegral(PyObject* obj, const char* type) {
#if PY_VERSION_HEX >= 0x030a00f0
// In Python-3.10 floats can no longer be silently converted to integers
// Keep backward compatible behavior for now
if (PyFloat_Check(obj)) {
return c10::checked_convert<T>(THPUtils_unpackDouble(obj), type);
}
return c10::checked_convert<T>(THPUtils_unpackLong(obj), type);
#else
return static_cast<T>(THPUtils_unpackLong(obj));
#endif
}
inline void store_scalar(void* data, at::ScalarType scalarType, PyObject* obj) {
switch (scalarType) {
case at::kByte:
*(uint8_t*)data = unpackIntegral<uint8_t>(obj, "uint8");
break;
case at::kUInt16:
*(uint16_t*)data = unpackIntegral<uint16_t>(obj, "uint16");
break;
case at::kUInt32:
*(uint32_t*)data = unpackIntegral<uint32_t>(obj, "uint32");
break;
case at::kUInt64:
// NB: This doesn't allow implicit conversion of float to int
*(uint64_t*)data = THPUtils_unpackUInt64(obj);
break;
case at::kChar:
*(int8_t*)data = unpackIntegral<int8_t>(obj, "int8");
break;
case at::kShort:
*(int16_t*)data = unpackIntegral<int16_t>(obj, "int16");
break;
case at::kInt:
*(int32_t*)data = unpackIntegral<int32_t>(obj, "int32");
break;
case at::kLong:
*(int64_t*)data = unpackIntegral<int64_t>(obj, "int64");
break;
case at::kHalf:
*(at::Half*)data =
at::convert<at::Half, double>(THPUtils_unpackDouble(obj));
break;
case at::kFloat:
*(float*)data = (float)THPUtils_unpackDouble(obj);
break;
case at::kDouble:
*(double*)data = THPUtils_unpackDouble(obj);
break;
case at::kComplexHalf:
*(c10::complex<at::Half>*)data =
(c10::complex<at::Half>)static_cast<c10::complex<float>>(
THPUtils_unpackComplexDouble(obj));
break;
case at::kComplexFloat:
*(c10::complex<float>*)data =
(c10::complex<float>)THPUtils_unpackComplexDouble(obj);
break;
case at::kComplexDouble:
*(c10::complex<double>*)data = THPUtils_unpackComplexDouble(obj);
break;
case at::kBool:
*(bool*)data = THPUtils_unpackNumberAsBool(obj);
break;
case at::kBFloat16:
*(at::BFloat16*)data =
at::convert<at::BFloat16, double>(THPUtils_unpackDouble(obj));
break;
// TODO(#146647): simplify below with macros
case at::kFloat8_e5m2:
*(at::Float8_e5m2*)data =
at::convert<at::Float8_e5m2, double>(THPUtils_unpackDouble(obj));
break;
case at::kFloat8_e5m2fnuz:
*(at::Float8_e5m2fnuz*)data =
at::convert<at::Float8_e5m2fnuz, double>(THPUtils_unpackDouble(obj));
break;
case at::kFloat8_e4m3fn:
*(at::Float8_e4m3fn*)data =
at::convert<at::Float8_e4m3fn, double>(THPUtils_unpackDouble(obj));
break;
case at::kFloat8_e4m3fnuz:
*(at::Float8_e4m3fnuz*)data =
at::convert<at::Float8_e4m3fnuz, double>(THPUtils_unpackDouble(obj));
break;
case at::kFloat8_e8m0fnu:
*(at::Float8_e8m0fnu*)data =
at::convert<at::Float8_e8m0fnu, double>(THPUtils_unpackDouble(obj));
break;
default:
throw std::runtime_error("store_scalar: invalid type");
}
}
inline PyObject* load_scalar(const void* data, at::ScalarType scalarType) {
switch (scalarType) {
case at::kByte:
return THPUtils_packInt64(*(uint8_t*)data);
case at::kUInt16:
return THPUtils_packInt64(*(uint16_t*)data);
case at::kUInt32:
return THPUtils_packUInt32(*(uint32_t*)data);
case at::kUInt64:
return THPUtils_packUInt64(*(uint64_t*)data);
case at::kChar:
return THPUtils_packInt64(*(int8_t*)data);
case at::kShort:
return THPUtils_packInt64(*(int16_t*)data);
case at::kInt:
return THPUtils_packInt64(*(int32_t*)data);
case at::kLong:
return THPUtils_packInt64(*(int64_t*)data);
case at::kHalf:
return PyFloat_FromDouble(
at::convert<double, at::Half>(*(at::Half*)data));
case at::kFloat:
return PyFloat_FromDouble(*(float*)data);
case at::kDouble:
return PyFloat_FromDouble(*(double*)data);
case at::kComplexHalf: {
auto data_ = reinterpret_cast<const c10::complex<at::Half>*>(data);
return PyComplex_FromDoubles(data_->real(), data_->imag());
}
case at::kComplexFloat: {
auto data_ = reinterpret_cast<const c10::complex<float>*>(data);
return PyComplex_FromDoubles(data_->real(), data_->imag());
}
case at::kComplexDouble:
return PyComplex_FromCComplex(
*reinterpret_cast<Py_complex*>((c10::complex<double>*)data));
case at::kBool:
// Don't use bool*, since it may take out-of-range byte as bool.
// Instead, we cast explicitly to avoid ASAN error.
return PyBool_FromLong(static_cast<bool>(*(uint8_t*)data));
case at::kBFloat16:
return PyFloat_FromDouble(
at::convert<double, at::BFloat16>(*(at::BFloat16*)data));
// TODO(#146647): simplify below with macros
case at::kFloat8_e5m2:
return PyFloat_FromDouble(
at::convert<double, at::Float8_e5m2>(*(at::Float8_e5m2*)data));
case at::kFloat8_e4m3fn:
return PyFloat_FromDouble(
at::convert<double, at::Float8_e4m3fn>(*(at::Float8_e4m3fn*)data));
case at::kFloat8_e5m2fnuz:
return PyFloat_FromDouble(at::convert<double, at::Float8_e5m2fnuz>(
*(at::Float8_e5m2fnuz*)data));
case at::kFloat8_e4m3fnuz:
return PyFloat_FromDouble(at::convert<double, at::Float8_e4m3fnuz>(
*(at::Float8_e4m3fnuz*)data));
case at::kFloat8_e8m0fnu:
return PyFloat_FromDouble(
at::convert<double, at::Float8_e8m0fnu>(*(at::Float8_e8m0fnu*)data));
default:
throw std::runtime_error("load_scalar: invalid type");
}
}
} // namespace torch::utils
```
|
=======================================================================================================================================
SOURCE CODE FILE: python_strings.h
LINES: 1
SIZE: 4.42 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\python_strings.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/object_ptr.h>
#include <torch/csrc/utils/pybind.h>
#include <stdexcept>
#include <string>
// Utilities for handling Python strings. Note that PyString, when defined, is
// the same as PyBytes.
// Returns true if obj is a bytes/str or unicode object
// As of Python 3.6, this does not require the GIL
inline bool THPUtils_checkString(PyObject* obj) {
return PyBytes_Check(obj) || PyUnicode_Check(obj);
}
// Unpacks PyBytes (PyString) or PyUnicode as std::string
// PyBytes are unpacked as-is. PyUnicode is unpacked as UTF-8.
// NOTE: this method requires the GIL
inline std::string THPUtils_unpackString(PyObject* obj) {
if (PyBytes_Check(obj)) {
size_t size = PyBytes_GET_SIZE(obj);
return std::string(PyBytes_AS_STRING(obj), size);
}
if (PyUnicode_Check(obj)) {
Py_ssize_t size = 0;
const char* data = PyUnicode_AsUTF8AndSize(obj, &size);
if (!data) {
throw std::runtime_error("error unpacking string as utf-8");
}
return std::string(data, (size_t)size);
}
throw std::runtime_error("unpackString: expected bytes or unicode object");
}
// Unpacks PyBytes (PyString) or PyUnicode as std::string_view
// PyBytes are unpacked as-is. PyUnicode is unpacked as UTF-8.
// NOTE: If `obj` is destroyed, then the non-owning std::string_view will
// become invalid. If the string needs to be accessed at any point after
// `obj` is destroyed, then the std::string_view should be copied into
// a std::string, or another owning object, and kept alive. For an example,
// look at how IValue and autograd nodes handle std::string_view arguments.
// NOTE: this method requires the GIL
inline std::string_view THPUtils_unpackStringView(PyObject* obj) {
if (PyBytes_Check(obj)) {
size_t size = PyBytes_GET_SIZE(obj);
return std::string_view(PyBytes_AS_STRING(obj), size);
}
if (PyUnicode_Check(obj)) {
Py_ssize_t size = 0;
const char* data = PyUnicode_AsUTF8AndSize(obj, &size);
if (!data) {
throw std::runtime_error("error unpacking string as utf-8");
}
return std::string_view(data, (size_t)size);
}
throw std::runtime_error("unpackString: expected bytes or unicode object");
}
inline PyObject* THPUtils_packString(const char* str) {
return PyUnicode_FromString(str);
}
inline PyObject* THPUtils_packString(const std::string& str) {
return PyUnicode_FromStringAndSize(
str.c_str(), static_cast<Py_ssize_t>(str.size()));
}
inline PyObject* THPUtils_internString(const std::string& str) {
return PyUnicode_InternFromString(str.c_str());
}
// Precondition: THPUtils_checkString(obj) must be true
inline bool THPUtils_isInterned(PyObject* obj) {
return PyUnicode_CHECK_INTERNED(obj);
}
// Precondition: THPUtils_checkString(obj) must be true
inline void THPUtils_internStringInPlace(PyObject** obj) {
PyUnicode_InternInPlace(obj);
}
/*
* Reference:
* https://github.com/numpy/numpy/blob/f4c497c768e0646df740b647782df463825bfd27/numpy/core/src/common/get_attr_string.h#L42
*
* Stripped down version of PyObject_GetAttrString,
* avoids lookups for None, tuple, and List objects,
* and doesn't create a PyErr since this code ignores it.
*
* This can be much faster then PyObject_GetAttrString where
* exceptions are not used by caller.
*
* 'obj' is the object to search for attribute.
*
* 'name' is the attribute to search for.
*
* Returns a py::object wrapping the return value. If the attribute lookup
* failed the value will be NULL.
*
*/
inline py::object PyObject_FastGetAttrString(PyObject* obj, const char* name) {
PyTypeObject* tp = Py_TYPE(obj);
PyObject* res = (PyObject*)nullptr;
/* Attribute referenced by (char *)name */
if (tp->tp_getattr != nullptr) {
// This is OK per https://bugs.python.org/issue39620
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
res = (*tp->tp_getattr)(obj, const_cast<char*>(name));
if (res == nullptr) {
PyErr_Clear();
}
}
/* Attribute referenced by (PyObject *)name */
else if (tp->tp_getattro != nullptr) {
auto w = py::reinterpret_steal<py::object>(THPUtils_internString(name));
if (w.ptr() == nullptr) {
return py::object();
}
res = (*tp->tp_getattro)(obj, w.ptr());
if (res == nullptr) {
PyErr_Clear();
}
}
return py::reinterpret_steal<py::object>(res);
}
```
|
====================================================================================================================================
SOURCE CODE FILE: python_stub.h
LINES: 1
SIZE: 0.06 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\python_stub.h
ENCODING: utf-8
```h
#pragma once
struct _object;
using PyObject = _object;
```
|
=======================================================================================================================================
SOURCE CODE FILE: python_symnode.h
LINES: 1
SIZE: 9.69 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\python_symnode.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/SafePyObject.h>
#include <c10/core/SymNodeImpl.h>
#include <torch/csrc/PyInterpreter.h>
#include <torch/csrc/autograd/python_variable.h>
#include <torch/csrc/utils/pybind.h>
namespace torch {
TORCH_PYTHON_API py::handle get_symint_class();
TORCH_PYTHON_API py::handle get_symfloat_class();
TORCH_PYTHON_API py::handle get_symbool_class();
// NB: These functions must not be called too early, otherwise torch not setup.
// Alternate design is to have torch "register" the object to us
inline bool is_symint(py::handle obj) {
return py::isinstance(obj, get_symint_class());
}
inline bool is_symfloat(py::handle obj) {
return py::isinstance(obj, get_symfloat_class());
}
inline bool is_symbool(py::handle obj) {
return py::isinstance(obj, get_symbool_class());
}
namespace impl {
// This c10::SymNodeImpl simply backends to a Python object that
// implements the API. The Python object is the source of truth,
// this is just an adapter so C++ calls can get to the object.
class PythonSymNodeImpl : public c10::SymNodeImpl {
public:
PythonSymNodeImpl(py::object pyobj) : c10::SymNodeImpl() {
pyobj_ = std::make_shared<c10::SafePyObject>(
pyobj.release().ptr(), getPyInterpreter());
}
c10::SymNode wrap_int(int64_t num) override {
py::gil_scoped_acquire acquire;
auto r = getPyObj().attr("wrap_int")(num);
return c10::make_intrusive<PythonSymNodeImpl>(std::move(r));
}
c10::SymNode wrap_float(double num) override {
py::gil_scoped_acquire acquire;
auto r = getPyObj().attr("wrap_float")(num);
return c10::make_intrusive<PythonSymNodeImpl>(std::move(r));
}
c10::SymNode wrap_bool(bool num) override {
py::gil_scoped_acquire acquire;
auto r = getPyObj().attr("wrap_bool")(num);
return c10::make_intrusive<PythonSymNodeImpl>(std::move(r));
}
#define TORCH_SYMNODE_SIZES_STRIDES(n) \
c10::SymNode n( \
c10::ArrayRef<c10::SymNode> sizes, c10::ArrayRef<c10::SymNode> strides) \
override { \
py::gil_scoped_acquire acquire; \
auto r = getPyObj().attr(#n)(sizes, strides); \
return c10::make_intrusive<PythonSymNodeImpl>(std::move(r)); \
}
// clang-format off
TORCH_SYMNODE_SIZES_STRIDES(is_contiguous)
TORCH_SYMNODE_SIZES_STRIDES(is_channels_last_contiguous_2d)
TORCH_SYMNODE_SIZES_STRIDES(is_channels_last_contiguous_3d)
TORCH_SYMNODE_SIZES_STRIDES(is_channels_last_strides_2d)
TORCH_SYMNODE_SIZES_STRIDES(is_channels_last_strides_3d)
TORCH_SYMNODE_SIZES_STRIDES(is_non_overlapping_and_dense)
// clang-format on
#undef TORCH_SYMNODE_SIZES_STRIDES
bool bool_() override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("bool_")().is(py::handle(Py_True));
}
bool is_int() override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("is_int")().is(py::handle(Py_True));
}
bool is_float() override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("is_float")().is(py::handle(Py_True));
}
bool is_bool() override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("is_bool")().is(py::handle(Py_True));
}
bool is_nested_int() const override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("is_nested_int")().is(py::handle(Py_True));
}
bool has_hint() override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("has_hint")().is(py::handle(Py_True));
}
int64_t guard_int(const char* file, int64_t line) override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("guard_int")(file, line).cast<int64_t>();
}
double guard_float(const char* file, int64_t line) override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("guard_float")(file, line).cast<double>();
}
bool guard_bool(const char* file, int64_t line) override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("guard_bool")(file, line).cast<bool>();
}
bool expect_true(const char* file, int64_t line) override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("expect_true")(file, line).cast<bool>();
}
bool expect_size(const char* file, int64_t line) override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("expect_size")(file, line).cast<bool>();
}
bool guard_size_oblivious(const char* file, int64_t line) override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("guard_size_oblivious")(file, line).cast<bool>();
}
int64_t int_() override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("int_")().cast<int64_t>();
}
std::optional<int64_t> maybe_as_int() override {
py::gil_scoped_acquire acquire;
const auto& r = getPyObj().attr("maybe_as_int")();
if (r.is_none()) {
return std::nullopt;
} else {
return r.cast<int64_t>();
}
}
std::string str() override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("str")().cast<std::string>();
}
std::string _graph_repr() override {
py::gil_scoped_acquire acquire;
return getPyObj().attr("_graph_repr")().cast<std::string>();
}
c10::SymNode dispatch_sym_ite_(
const char* fname,
const c10::SymNode& other,
const c10::SymNode& third) {
auto pother = dynamic_cast<PythonSymNodeImpl*>(other.get());
auto pthird = dynamic_cast<PythonSymNodeImpl*>(third.get());
TORCH_CHECK(pother);
TORCH_CHECK(pthird);
py::gil_scoped_acquire acquire;
auto r = getPyObj().attr(fname)(pother->getPyObj(), pthird->getPyObj());
return c10::make_intrusive<PythonSymNodeImpl>(r);
}
c10::SymNode dispatch_common_(const char* fname, const c10::SymNode& other) {
auto pother = dynamic_cast<PythonSymNodeImpl*>(other.get());
TORCH_CHECK(pother);
py::gil_scoped_acquire acquire;
auto r = getPyObj().attr(fname)(pother->getPyObj());
return c10::make_intrusive<PythonSymNodeImpl>(r);
}
c10::SymNode dispatch_common_(const char* fname) {
py::gil_scoped_acquire acquire;
auto r = getPyObj().attr(fname)();
return c10::make_intrusive<PythonSymNodeImpl>(r);
}
c10::SymNode add(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode sub(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode mul(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode truediv(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode float_truediv(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode int_truediv(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode pow(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode float_pow(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode pow_by_natural(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode floordiv(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode int_floordiv(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode mod(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode eq(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode ne(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode gt(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode lt(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode le(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode ge(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode sym_min(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode sym_max(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode sym_and(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode sym_or(const c10::SymNode& other) override {
return dispatch_common_(__func__, other);
}
c10::SymNode sym_ite(const c10::SymNode& other, const c10::SymNode& third)
override {
return dispatch_sym_ite_(__func__, other, third);
}
c10::SymNode sym_not() override {
return dispatch_common_(__func__);
}
c10::SymNode ceil() override {
return dispatch_common_(__func__);
}
c10::SymNode floor() override {
return dispatch_common_(__func__);
}
c10::SymNode neg() override {
return dispatch_common_(__func__);
}
c10::SymNode clone() override {
return dispatch_common_(__func__);
}
c10::SymNode sym_float() override {
return dispatch_common_(__func__);
}
py::handle getPyObj() const {
return py::handle(pyobj_->ptr(getPyInterpreter()));
}
std::shared_ptr<c10::SafePyObject> pyobj_ = nullptr;
};
} // namespace impl
} // namespace torch
```
|
===================================================================================================================================================
SOURCE CODE FILE: python_torch_function_mode.h
LINES: 1
SIZE: 0.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\python_torch_function_mode.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/PythonTorchFunctionTLS.h>
namespace torch::overrides {
struct StashTorchFunctionModeGuard {
StashTorchFunctionModeGuard() {
cur_mode_ = at::impl::PythonTorchFunctionTLS::pop_stack();
}
~StashTorchFunctionModeGuard() {
at::impl::PythonTorchFunctionTLS::push_onto_stack(cur_mode_);
}
StashTorchFunctionModeGuard(const StashTorchFunctionModeGuard&) = delete;
StashTorchFunctionModeGuard(StashTorchFunctionModeGuard&&) = delete;
StashTorchFunctionModeGuard& operator=(const StashTorchFunctionModeGuard&) =
delete;
StashTorchFunctionModeGuard& operator=(StashTorchFunctionModeGuard&&) =
delete;
const std::shared_ptr<c10::SafePyObject>& get_cur_mode() {
return cur_mode_;
}
private:
std::shared_ptr<c10::SafePyObject> cur_mode_;
};
} // namespace torch::overrides
```
|
======================================================================================================================================
SOURCE CODE FILE: python_tuples.h
LINES: 1
SIZE: 0.74 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\python_tuples.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/object_ptr.h>
#include <torch/csrc/utils/python_numbers.h>
inline void THPUtils_packInt64Array(
PyObject* tuple,
size_t size,
const int64_t* sizes) {
for (size_t i = 0; i != size; ++i) {
PyObject* i64 = THPUtils_packInt64(sizes[i]);
if (!i64) {
throw python_error();
}
PyTuple_SET_ITEM(tuple, i, i64);
}
}
inline PyObject* THPUtils_packInt64Array(size_t size, const int64_t* sizes) {
THPObjectPtr tuple(PyTuple_New(static_cast<Py_ssize_t>(size)));
if (!tuple)
throw python_error();
THPUtils_packInt64Array(tuple.get(), size, sizes);
return tuple.release();
}
```
|
==========================================================================================================================================
SOURCE CODE FILE: pythoncapi_compat.h
LINES: 1
SIZE: 41.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\pythoncapi_compat.h
ENCODING: utf-8
```h
// Header file providing new C API functions to old Python versions.
//
// File distributed under the Zero Clause BSD (0BSD) license.
// Copyright Contributors to the pythoncapi_compat project.
//
// Homepage:
// https://github.com/python/pythoncapi_compat
//
// Latest version:
// https://raw.githubusercontent.com/python/pythoncapi_compat/master/pythoncapi_compat.h
//
// SPDX-License-Identifier: 0BSD
#ifndef PYTHONCAPI_COMPAT
#define PYTHONCAPI_COMPAT
#ifdef __cplusplus
extern "C" {
#endif
#include <Python.h>
// Python 3.11.0b4 added PyFrame_Back() to Python.h
#if PY_VERSION_HEX < 0x030b00B4 && !defined(PYPY_VERSION)
# include "frameobject.h" // PyFrameObject, PyFrame_GetBack()
#endif
#ifndef _Py_CAST
# define _Py_CAST(type, expr) ((type)(expr))
#endif
// Static inline functions should use _Py_NULL rather than using directly NULL
// to prevent C++ compiler warnings. On C23 and newer and on C++11 and newer,
// _Py_NULL is defined as nullptr.
#if (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L) \
|| (defined(__cplusplus) && __cplusplus >= 201103)
# define _Py_NULL nullptr
#else
# define _Py_NULL NULL
#endif
// Cast argument to PyObject* type.
#ifndef _PyObject_CAST
# define _PyObject_CAST(op) _Py_CAST(PyObject*, op)
#endif
// bpo-42262 added Py_NewRef() to Python 3.10.0a3
#if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_NewRef)
static inline PyObject* _Py_NewRef(PyObject *obj)
{
Py_INCREF(obj);
return obj;
}
#define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj))
#endif
// bpo-42262 added Py_XNewRef() to Python 3.10.0a3
#if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_XNewRef)
static inline PyObject* _Py_XNewRef(PyObject *obj)
{
Py_XINCREF(obj);
return obj;
}
#define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj))
#endif
// bpo-39573 added Py_SET_REFCNT() to Python 3.9.0a4
#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_REFCNT)
static inline void _Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt)
{
ob->ob_refcnt = refcnt;
}
#define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT(_PyObject_CAST(ob), refcnt)
#endif
// Py_SETREF() and Py_XSETREF() were added to Python 3.5.2.
// It is excluded from the limited C API.
#if (PY_VERSION_HEX < 0x03050200 && !defined(Py_SETREF)) && !defined(Py_LIMITED_API)
#define Py_SETREF(dst, src) \
do { \
PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \
PyObject *_tmp_dst = (*_tmp_dst_ptr); \
*_tmp_dst_ptr = _PyObject_CAST(src); \
Py_DECREF(_tmp_dst); \
} while (0)
#define Py_XSETREF(dst, src) \
do { \
PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \
PyObject *_tmp_dst = (*_tmp_dst_ptr); \
*_tmp_dst_ptr = _PyObject_CAST(src); \
Py_XDECREF(_tmp_dst); \
} while (0)
#endif
// bpo-43753 added Py_Is(), Py_IsNone(), Py_IsTrue() and Py_IsFalse()
// to Python 3.10.0b1.
#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_Is)
# define Py_Is(x, y) ((x) == (y))
#endif
#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_IsNone)
# define Py_IsNone(x) Py_Is(x, Py_None)
#endif
#if (PY_VERSION_HEX < 0x030A00B1 || defined(PYPY_VERSION)) && !defined(Py_IsTrue)
# define Py_IsTrue(x) Py_Is(x, Py_True)
#endif
#if (PY_VERSION_HEX < 0x030A00B1 || defined(PYPY_VERSION)) && !defined(Py_IsFalse)
# define Py_IsFalse(x) Py_Is(x, Py_False)
#endif
// bpo-39573 added Py_SET_TYPE() to Python 3.9.0a4
#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE)
static inline void _Py_SET_TYPE(PyObject *ob, PyTypeObject *type)
{
ob->ob_type = type;
}
#define Py_SET_TYPE(ob, type) _Py_SET_TYPE(_PyObject_CAST(ob), type)
#endif
// bpo-39573 added Py_SET_SIZE() to Python 3.9.0a4
#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_SIZE)
static inline void _Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size)
{
ob->ob_size = size;
}
#define Py_SET_SIZE(ob, size) _Py_SET_SIZE((PyVarObject*)(ob), size)
#endif
// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
#if PY_VERSION_HEX < 0x030900B1 || defined(PYPY_VERSION)
static inline PyCodeObject* PyFrame_GetCode(PyFrameObject *frame)
{
assert(frame != _Py_NULL);
assert(frame->f_code != _Py_NULL);
return _Py_CAST(PyCodeObject*, Py_NewRef(frame->f_code));
}
#endif
static inline PyCodeObject* _PyFrame_GetCodeBorrow(PyFrameObject *frame)
{
PyCodeObject *code = PyFrame_GetCode(frame);
Py_DECREF(code);
return code;
}
// bpo-40421 added PyFrame_GetBack() to Python 3.9.0b1
#if PY_VERSION_HEX < 0x030900B1 && !defined(PYPY_VERSION)
static inline PyFrameObject* PyFrame_GetBack(PyFrameObject *frame)
{
assert(frame != _Py_NULL);
return _Py_CAST(PyFrameObject*, Py_XNewRef(frame->f_back));
}
#endif
#if !defined(PYPY_VERSION)
static inline PyFrameObject* _PyFrame_GetBackBorrow(PyFrameObject *frame)
{
PyFrameObject *back = PyFrame_GetBack(frame);
Py_XDECREF(back);
return back;
}
#endif
// bpo-40421 added PyFrame_GetLocals() to Python 3.11.0a7
#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION)
static inline PyObject* PyFrame_GetLocals(PyFrameObject *frame)
{
#if PY_VERSION_HEX >= 0x030400B1
if (PyFrame_FastToLocalsWithError(frame) < 0) {
return NULL;
}
#else
PyFrame_FastToLocals(frame);
#endif
return Py_NewRef(frame->f_locals);
}
#endif
// bpo-40421 added PyFrame_GetGlobals() to Python 3.11.0a7
#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION)
static inline PyObject* PyFrame_GetGlobals(PyFrameObject *frame)
{
return Py_NewRef(frame->f_globals);
}
#endif
// bpo-40421 added PyFrame_GetBuiltins() to Python 3.11.0a7
#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION)
static inline PyObject* PyFrame_GetBuiltins(PyFrameObject *frame)
{
return Py_NewRef(frame->f_builtins);
}
#endif
// bpo-40421 added PyFrame_GetLasti() to Python 3.11.0b1
#if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION)
static inline int PyFrame_GetLasti(PyFrameObject *frame)
{
#if PY_VERSION_HEX >= 0x030A00A7
// bpo-27129: Since Python 3.10.0a7, f_lasti is an instruction offset,
// not a bytes offset anymore. Python uses 16-bit "wordcode" (2 bytes)
// instructions.
if (frame->f_lasti < 0) {
return -1;
}
return frame->f_lasti * 2;
#else
return frame->f_lasti;
#endif
}
#endif
// gh-91248 added PyFrame_GetVar() to Python 3.12.0a2
#if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION)
static inline PyObject* PyFrame_GetVar(PyFrameObject *frame, PyObject *name)
{
PyObject *locals, *value;
locals = PyFrame_GetLocals(frame);
if (locals == NULL) {
return NULL;
}
#if PY_VERSION_HEX >= 0x03000000
value = PyDict_GetItemWithError(locals, name);
#else
value = _PyDict_GetItemWithError(locals, name);
#endif
Py_DECREF(locals);
if (value == NULL) {
if (PyErr_Occurred()) {
return NULL;
}
#if PY_VERSION_HEX >= 0x03000000
PyErr_Format(PyExc_NameError, "variable %R does not exist", name);
#else
PyErr_SetString(PyExc_NameError, "variable does not exist");
#endif
return NULL;
}
return Py_NewRef(value);
}
#endif
// gh-91248 added PyFrame_GetVarString() to Python 3.12.0a2
#if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION)
static inline PyObject*
PyFrame_GetVarString(PyFrameObject *frame, const char *name)
{
PyObject *name_obj, *value;
#if PY_VERSION_HEX >= 0x03000000
name_obj = PyUnicode_FromString(name);
#else
name_obj = PyString_FromString(name);
#endif
if (name_obj == NULL) {
return NULL;
}
value = PyFrame_GetVar(frame, name_obj);
Py_DECREF(name_obj);
return value;
}
#endif
// bpo-39947 added PyThreadState_GetInterpreter() to Python 3.9.0a5
#if PY_VERSION_HEX < 0x030900A5 || defined(PYPY_VERSION)
static inline PyInterpreterState *
PyThreadState_GetInterpreter(PyThreadState *tstate)
{
assert(tstate != _Py_NULL);
return tstate->interp;
}
#endif
// bpo-40429 added PyThreadState_GetFrame() to Python 3.9.0b1
#if PY_VERSION_HEX < 0x030900B1 && !defined(PYPY_VERSION)
static inline PyFrameObject* PyThreadState_GetFrame(PyThreadState *tstate)
{
assert(tstate != _Py_NULL);
return _Py_CAST(PyFrameObject *, Py_XNewRef(tstate->frame));
}
#endif
#if !defined(PYPY_VERSION)
static inline PyFrameObject*
_PyThreadState_GetFrameBorrow(PyThreadState *tstate)
{
PyFrameObject *frame = PyThreadState_GetFrame(tstate);
Py_XDECREF(frame);
return frame;
}
#endif
// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a5
#if PY_VERSION_HEX < 0x030900A5 || defined(PYPY_VERSION)
static inline PyInterpreterState* PyInterpreterState_Get(void)
{
PyThreadState *tstate;
PyInterpreterState *interp;
tstate = PyThreadState_GET();
if (tstate == _Py_NULL) {
Py_FatalError("GIL released (tstate is NULL)");
}
interp = tstate->interp;
if (interp == _Py_NULL) {
Py_FatalError("no current interpreter");
}
return interp;
}
#endif
// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a6
#if 0x030700A1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030900A6 && !defined(PYPY_VERSION)
static inline uint64_t PyThreadState_GetID(PyThreadState *tstate)
{
assert(tstate != _Py_NULL);
return tstate->id;
}
#endif
// bpo-43760 added PyThreadState_EnterTracing() to Python 3.11.0a2
#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION)
static inline void PyThreadState_EnterTracing(PyThreadState *tstate)
{
tstate->tracing++;
#if PY_VERSION_HEX >= 0x030A00A1
tstate->cframe->use_tracing = 0;
#else
tstate->use_tracing = 0;
#endif
}
#endif
// bpo-43760 added PyThreadState_LeaveTracing() to Python 3.11.0a2
#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION)
static inline void PyThreadState_LeaveTracing(PyThreadState *tstate)
{
int use_tracing = (tstate->c_tracefunc != _Py_NULL
|| tstate->c_profilefunc != _Py_NULL);
tstate->tracing--;
#if PY_VERSION_HEX >= 0x030A00A1
tstate->cframe->use_tracing = use_tracing;
#else
tstate->use_tracing = use_tracing;
#endif
}
#endif
// bpo-37194 added PyObject_CallNoArgs() to Python 3.9.0a1
// PyObject_CallNoArgs() added to PyPy 3.9.16-v7.3.11
#if !defined(PyObject_CallNoArgs) && PY_VERSION_HEX < 0x030900A1
static inline PyObject* PyObject_CallNoArgs(PyObject *func)
{
return PyObject_CallFunctionObjArgs(func, NULL);
}
#endif
// bpo-39245 made PyObject_CallOneArg() public (previously called
// _PyObject_CallOneArg) in Python 3.9.0a4
// PyObject_CallOneArg() added to PyPy 3.9.16-v7.3.11
#if !defined(PyObject_CallOneArg) && PY_VERSION_HEX < 0x030900A4
static inline PyObject* PyObject_CallOneArg(PyObject *func, PyObject *arg)
{
return PyObject_CallFunctionObjArgs(func, arg, NULL);
}
#endif
// bpo-1635741 added PyModule_AddObjectRef() to Python 3.10.0a3
#if PY_VERSION_HEX < 0x030A00A3
static inline int
PyModule_AddObjectRef(PyObject *module, const char *name, PyObject *value)
{
int res;
if (!value && !PyErr_Occurred()) {
// PyModule_AddObject() raises TypeError in this case
PyErr_SetString(PyExc_SystemError,
"PyModule_AddObjectRef() must be called "
"with an exception raised if value is NULL");
return -1;
}
Py_XINCREF(value);
res = PyModule_AddObject(module, name, value);
if (res < 0) {
Py_XDECREF(value);
}
return res;
}
#endif
// bpo-40024 added PyModule_AddType() to Python 3.9.0a5
#if PY_VERSION_HEX < 0x030900A5
static inline int PyModule_AddType(PyObject *module, PyTypeObject *type)
{
const char *name, *dot;
if (PyType_Ready(type) < 0) {
return -1;
}
// inline _PyType_Name()
name = type->tp_name;
assert(name != _Py_NULL);
dot = strrchr(name, '.');
if (dot != _Py_NULL) {
name = dot + 1;
}
return PyModule_AddObjectRef(module, name, _PyObject_CAST(type));
}
#endif
// bpo-40241 added PyObject_GC_IsTracked() to Python 3.9.0a6.
// bpo-4688 added _PyObject_GC_IS_TRACKED() to Python 2.7.0a2.
#if PY_VERSION_HEX < 0x030900A6 && !defined(PYPY_VERSION)
static inline int PyObject_GC_IsTracked(PyObject* obj)
{
return (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj));
}
#endif
// bpo-40241 added PyObject_GC_IsFinalized() to Python 3.9.0a6.
// bpo-18112 added _PyGCHead_FINALIZED() to Python 3.4.0 final.
#if PY_VERSION_HEX < 0x030900A6 && PY_VERSION_HEX >= 0x030400F0 && !defined(PYPY_VERSION)
static inline int PyObject_GC_IsFinalized(PyObject *obj)
{
PyGC_Head *gc = _Py_CAST(PyGC_Head*, obj) - 1;
return (PyObject_IS_GC(obj) && _PyGCHead_FINALIZED(gc));
}
#endif
// bpo-39573 added Py_IS_TYPE() to Python 3.9.0a4
#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_IS_TYPE)
static inline int _Py_IS_TYPE(PyObject *ob, PyTypeObject *type) {
return Py_TYPE(ob) == type;
}
#define Py_IS_TYPE(ob, type) _Py_IS_TYPE(_PyObject_CAST(ob), type)
#endif
// bpo-46906 added PyFloat_Pack2() and PyFloat_Unpack2() to Python 3.11a7.
// bpo-11734 added _PyFloat_Pack2() and _PyFloat_Unpack2() to Python 3.6.0b1.
// Python 3.11a2 moved _PyFloat_Pack2() and _PyFloat_Unpack2() to the internal
// C API: Python 3.11a2-3.11a6 versions are not supported.
#if 0x030600B1 <= PY_VERSION_HEX && PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION)
static inline int PyFloat_Pack2(double x, char *p, int le)
{ return _PyFloat_Pack2(x, (unsigned char*)p, le); }
static inline double PyFloat_Unpack2(const char *p, int le)
{ return _PyFloat_Unpack2((const unsigned char *)p, le); }
#endif
// bpo-46906 added PyFloat_Pack4(), PyFloat_Pack8(), PyFloat_Unpack4() and
// PyFloat_Unpack8() to Python 3.11a7.
// Python 3.11a2 moved _PyFloat_Pack4(), _PyFloat_Pack8(), _PyFloat_Unpack4()
// and _PyFloat_Unpack8() to the internal C API: Python 3.11a2-3.11a6 versions
// are not supported.
#if PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION)
static inline int PyFloat_Pack4(double x, char *p, int le)
{ return _PyFloat_Pack4(x, (unsigned char*)p, le); }
static inline int PyFloat_Pack8(double x, char *p, int le)
{ return _PyFloat_Pack8(x, (unsigned char*)p, le); }
static inline double PyFloat_Unpack4(const char *p, int le)
{ return _PyFloat_Unpack4((const unsigned char *)p, le); }
static inline double PyFloat_Unpack8(const char *p, int le)
{ return _PyFloat_Unpack8((const unsigned char *)p, le); }
#endif
// gh-92154 added PyCode_GetCode() to Python 3.11.0b1
#if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION)
static inline PyObject* PyCode_GetCode(PyCodeObject *code)
{
return Py_NewRef(code->co_code);
}
#endif
// gh-95008 added PyCode_GetVarnames() to Python 3.11.0rc1
#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION)
static inline PyObject* PyCode_GetVarnames(PyCodeObject *code)
{
return Py_NewRef(code->co_varnames);
}
#endif
// gh-95008 added PyCode_GetFreevars() to Python 3.11.0rc1
#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION)
static inline PyObject* PyCode_GetFreevars(PyCodeObject *code)
{
return Py_NewRef(code->co_freevars);
}
#endif
// gh-95008 added PyCode_GetCellvars() to Python 3.11.0rc1
#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION)
static inline PyObject* PyCode_GetCellvars(PyCodeObject *code)
{
return Py_NewRef(code->co_cellvars);
}
#endif
// Py_UNUSED() was added to Python 3.4.0b2.
#if PY_VERSION_HEX < 0x030400B2 && !defined(Py_UNUSED)
# if defined(__GNUC__) || defined(__clang__)
# define Py_UNUSED(name) _unused_ ## name __attribute__((unused))
# else
# define Py_UNUSED(name) _unused_ ## name
# endif
#endif
// gh-105922 added PyImport_AddModuleRef() to Python 3.13.0a1
#if PY_VERSION_HEX < 0x030D00A0
static inline PyObject* PyImport_AddModuleRef(const char *name)
{
return Py_XNewRef(PyImport_AddModule(name));
}
#endif
// gh-105927 added PyWeakref_GetRef() to Python 3.13.0a1
#if PY_VERSION_HEX < 0x030D0000
static inline int PyWeakref_GetRef(PyObject *ref, PyObject **pobj)
{
PyObject *obj;
if (ref != NULL && !PyWeakref_Check(ref)) {
*pobj = NULL;
PyErr_SetString(PyExc_TypeError, "expected a weakref");
return -1;
}
obj = PyWeakref_GetObject(ref);
if (obj == NULL) {
// SystemError if ref is NULL
*pobj = NULL;
return -1;
}
if (obj == Py_None) {
*pobj = NULL;
return 0;
}
*pobj = Py_NewRef(obj);
return (*pobj != NULL);
}
#endif
// bpo-36974 added PY_VECTORCALL_ARGUMENTS_OFFSET to Python 3.8b1
#ifndef PY_VECTORCALL_ARGUMENTS_OFFSET
# define PY_VECTORCALL_ARGUMENTS_OFFSET (_Py_CAST(size_t, 1) << (8 * sizeof(size_t) - 1))
#endif
// bpo-36974 added PyVectorcall_NARGS() to Python 3.8b1
#if PY_VERSION_HEX < 0x030800B1
static inline Py_ssize_t PyVectorcall_NARGS(size_t n)
{
return n & ~PY_VECTORCALL_ARGUMENTS_OFFSET;
}
#endif
// gh-105922 added PyObject_Vectorcall() to Python 3.9.0a4
#if PY_VERSION_HEX < 0x030900A4
static inline PyObject*
PyObject_Vectorcall(PyObject *callable, PyObject *const *args,
size_t nargsf, PyObject *kwnames)
{
#if PY_VERSION_HEX >= 0x030800B1 && !defined(PYPY_VERSION)
// bpo-36974 added _PyObject_Vectorcall() to Python 3.8.0b1
return _PyObject_Vectorcall(callable, args, nargsf, kwnames);
#else
PyObject *posargs = NULL, *kwargs = NULL;
PyObject *res;
Py_ssize_t nposargs, nkwargs, i;
if (nargsf != 0 && args == NULL) {
PyErr_BadInternalCall();
goto error;
}
if (kwnames != NULL && !PyTuple_Check(kwnames)) {
PyErr_BadInternalCall();
goto error;
}
nposargs = (Py_ssize_t)PyVectorcall_NARGS(nargsf);
if (kwnames) {
nkwargs = PyTuple_GET_SIZE(kwnames);
}
else {
nkwargs = 0;
}
posargs = PyTuple_New(nposargs);
if (posargs == NULL) {
goto error;
}
if (nposargs) {
for (i=0; i < nposargs; i++) {
PyTuple_SET_ITEM(posargs, i, Py_NewRef(*args));
args++;
}
}
if (nkwargs) {
kwargs = PyDict_New();
if (kwargs == NULL) {
goto error;
}
for (i = 0; i < nkwargs; i++) {
PyObject *key = PyTuple_GET_ITEM(kwnames, i);
PyObject *value = *args;
args++;
if (PyDict_SetItem(kwargs, key, value) < 0) {
goto error;
}
}
}
else {
kwargs = NULL;
}
res = PyObject_Call(callable, posargs, kwargs);
Py_DECREF(posargs);
Py_XDECREF(kwargs);
return res;
error:
Py_DECREF(posargs);
Py_XDECREF(kwargs);
return NULL;
#endif
}
#endif
// gh-106521 added PyObject_GetOptionalAttr() and
// PyObject_GetOptionalAttrString() to Python 3.13.0a1
#if PY_VERSION_HEX < 0x030D00A1
static inline int
PyObject_GetOptionalAttr(PyObject *obj, PyObject *attr_name, PyObject **result)
{
// bpo-32571 added _PyObject_LookupAttr() to Python 3.7.0b1
#if PY_VERSION_HEX >= 0x030700B1 && !defined(PYPY_VERSION)
return _PyObject_LookupAttr(obj, attr_name, result);
#else
*result = PyObject_GetAttr(obj, attr_name);
if (*result != NULL) {
return 1;
}
if (!PyErr_Occurred()) {
return 0;
}
if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
return 0;
}
return -1;
#endif
}
static inline int
PyObject_GetOptionalAttrString(PyObject *obj, const char *attr_name, PyObject **result)
{
PyObject *name_obj;
int rc;
#if PY_VERSION_HEX >= 0x03000000
name_obj = PyUnicode_FromString(attr_name);
#else
name_obj = PyString_FromString(attr_name);
#endif
if (name_obj == NULL) {
*result = NULL;
return -1;
}
rc = PyObject_GetOptionalAttr(obj, name_obj, result);
Py_DECREF(name_obj);
return rc;
}
#endif
// gh-106307 added PyObject_GetOptionalAttr() and
// PyMapping_GetOptionalItemString() to Python 3.13.0a1
#if PY_VERSION_HEX < 0x030D00A1
static inline int
PyMapping_GetOptionalItem(PyObject *obj, PyObject *key, PyObject **result)
{
*result = PyObject_GetItem(obj, key);
if (*result) {
return 1;
}
if (!PyErr_ExceptionMatches(PyExc_KeyError)) {
return -1;
}
PyErr_Clear();
return 0;
}
static inline int
PyMapping_GetOptionalItemString(PyObject *obj, const char *key, PyObject **result)
{
PyObject *key_obj;
int rc;
#if PY_VERSION_HEX >= 0x03000000
key_obj = PyUnicode_FromString(key);
#else
key_obj = PyString_FromString(key);
#endif
if (key_obj == NULL) {
*result = NULL;
return -1;
}
rc = PyMapping_GetOptionalItem(obj, key_obj, result);
Py_DECREF(key_obj);
return rc;
}
#endif
// gh-108511 added PyMapping_HasKeyWithError() and
// PyMapping_HasKeyStringWithError() to Python 3.13.0a1
#if PY_VERSION_HEX < 0x030D00A1
static inline int
PyMapping_HasKeyWithError(PyObject *obj, PyObject *key)
{
PyObject *res;
int rc = PyMapping_GetOptionalItem(obj, key, &res);
Py_XDECREF(res);
return rc;
}
static inline int
PyMapping_HasKeyStringWithError(PyObject *obj, const char *key)
{
PyObject *res;
int rc = PyMapping_GetOptionalItemString(obj, key, &res);
Py_XDECREF(res);
return rc;
}
#endif
// gh-108511 added PyObject_HasAttrWithError() and
// PyObject_HasAttrStringWithError() to Python 3.13.0a1
#if PY_VERSION_HEX < 0x030D00A1
static inline int
PyObject_HasAttrWithError(PyObject *obj, PyObject *attr)
{
PyObject *res;
int rc = PyObject_GetOptionalAttr(obj, attr, &res);
Py_XDECREF(res);
return rc;
}
static inline int
PyObject_HasAttrStringWithError(PyObject *obj, const char *attr)
{
PyObject *res;
int rc = PyObject_GetOptionalAttrString(obj, attr, &res);
Py_XDECREF(res);
return rc;
}
#endif
// gh-106004 added PyDict_GetItemRef() and PyDict_GetItemStringRef()
// to Python 3.13.0a1
#if PY_VERSION_HEX < 0x030D00A1
static inline int
PyDict_GetItemRef(PyObject *mp, PyObject *key, PyObject **result)
{
#if PY_VERSION_HEX >= 0x03000000
PyObject *item = PyDict_GetItemWithError(mp, key);
#else
PyObject *item = _PyDict_GetItemWithError(mp, key);
#endif
if (item != NULL) {
*result = Py_NewRef(item);
return 1; // found
}
if (!PyErr_Occurred()) {
*result = NULL;
return 0; // not found
}
*result = NULL;
return -1;
}
static inline int
PyDict_GetItemStringRef(PyObject *mp, const char *key, PyObject **result)
{
int res;
#if PY_VERSION_HEX >= 0x03000000
PyObject *key_obj = PyUnicode_FromString(key);
#else
PyObject *key_obj = PyString_FromString(key);
#endif
if (key_obj == NULL) {
*result = NULL;
return -1;
}
res = PyDict_GetItemRef(mp, key_obj, result);
Py_DECREF(key_obj);
return res;
}
#endif
// gh-106307 added PyModule_Add() to Python 3.13.0a1
#if PY_VERSION_HEX < 0x030D00A1
static inline int
PyModule_Add(PyObject *mod, const char *name, PyObject *value)
{
int res = PyModule_AddObjectRef(mod, name, value);
Py_XDECREF(value);
return res;
}
#endif
// gh-108014 added Py_IsFinalizing() to Python 3.13.0a1
// bpo-1856 added _Py_Finalizing to Python 3.2.1b1.
// _Py_IsFinalizing() was added to PyPy 7.3.0.
#if (0x030201B1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030D00A1) \
&& (!defined(PYPY_VERSION_NUM) || PYPY_VERSION_NUM >= 0x7030000)
static inline int Py_IsFinalizing(void)
{
#if PY_VERSION_HEX >= 0x030700A1
// _Py_IsFinalizing() was added to Python 3.7.0a1.
return _Py_IsFinalizing();
#else
return (_Py_Finalizing != NULL);
#endif
}
#endif
// gh-108323 added PyDict_ContainsString() to Python 3.13.0a1
#if PY_VERSION_HEX < 0x030D00A1
static inline int PyDict_ContainsString(PyObject *op, const char *key)
{
PyObject *key_obj = PyUnicode_FromString(key);
if (key_obj == NULL) {
return -1;
}
int res = PyDict_Contains(op, key_obj);
Py_DECREF(key_obj);
return res;
}
#endif
// gh-108445 added PyLong_AsInt() to Python 3.13.0a1
#if PY_VERSION_HEX < 0x030D00A1
static inline int PyLong_AsInt(PyObject *obj)
{
#ifdef PYPY_VERSION
long value = PyLong_AsLong(obj);
if (value == -1 && PyErr_Occurred()) {
return -1;
}
if (value < (long)INT_MIN || (long)INT_MAX < value) {
PyErr_SetString(PyExc_OverflowError,
"Python int too large to convert to C int");
return -1;
}
return (int)value;
#else
return _PyLong_AsInt(obj);
#endif
}
#endif
// gh-107073 added PyObject_VisitManagedDict() to Python 3.13.0a1
#if PY_VERSION_HEX < 0x030D00A1
static inline int
PyObject_VisitManagedDict(PyObject *obj, visitproc visit, void *arg)
{
PyObject **dict = _PyObject_GetDictPtr(obj);
if (*dict == NULL) {
return -1;
}
Py_VISIT(*dict);
return 0;
}
static inline void
PyObject_ClearManagedDict(PyObject *obj)
{
PyObject **dict = _PyObject_GetDictPtr(obj);
if (*dict == NULL) {
return;
}
Py_CLEAR(*dict);
}
#endif
// gh-108867 added PyThreadState_GetUnchecked() to Python 3.13.0a1
// Python 3.5.2 added _PyThreadState_UncheckedGet().
#if PY_VERSION_HEX >= 0x03050200 && PY_VERSION_HEX < 0x030D00A1
static inline PyThreadState*
PyThreadState_GetUnchecked(void)
{
return _PyThreadState_UncheckedGet();
}
#endif
// gh-110289 added PyUnicode_EqualToUTF8() and PyUnicode_EqualToUTF8AndSize()
// to Python 3.13.0a1
#if PY_VERSION_HEX < 0x030D00A1
static inline int
PyUnicode_EqualToUTF8AndSize(PyObject *unicode, const char *str, Py_ssize_t str_len)
{
Py_ssize_t len;
const void *utf8;
PyObject *exc_type, *exc_value, *exc_tb;
int res;
// API cannot report errors so save/restore the exception
PyErr_Fetch(&exc_type, &exc_value, &exc_tb);
// Python 3.3.0a1 added PyUnicode_AsUTF8AndSize()
#if PY_VERSION_HEX >= 0x030300A1
if (PyUnicode_IS_ASCII(unicode)) {
utf8 = PyUnicode_DATA(unicode);
len = PyUnicode_GET_LENGTH(unicode);
}
else {
utf8 = PyUnicode_AsUTF8AndSize(unicode, &len);
if (utf8 == NULL) {
// Memory allocation failure. The API cannot report error,
// so ignore the exception and return 0.
res = 0;
goto done;
}
}
if (len != str_len) {
res = 0;
goto done;
}
res = (memcmp(utf8, str, (size_t)len) == 0);
#else
PyObject *bytes = PyUnicode_AsUTF8String(unicode);
if (bytes == NULL) {
// Memory allocation failure. The API cannot report error,
// so ignore the exception and return 0.
res = 0;
goto done;
}
#if PY_VERSION_HEX >= 0x03000000
len = PyBytes_GET_SIZE(bytes);
utf8 = PyBytes_AS_STRING(bytes);
#else
len = PyString_GET_SIZE(bytes);
utf8 = PyString_AS_STRING(bytes);
#endif
if (len != str_len) {
Py_DECREF(bytes);
res = 0;
goto done;
}
res = (memcmp(utf8, str, (size_t)len) == 0);
Py_DECREF(bytes);
#endif
done:
PyErr_Restore(exc_type, exc_value, exc_tb);
return res;
}
static inline int
PyUnicode_EqualToUTF8(PyObject *unicode, const char *str)
{
return PyUnicode_EqualToUTF8AndSize(unicode, str, (Py_ssize_t)strlen(str));
}
#endif
// gh-111138 added PyList_Extend() and PyList_Clear() to Python 3.13.0a2
#if PY_VERSION_HEX < 0x030D00A2
static inline int
PyList_Extend(PyObject *list, PyObject *iterable)
{
return PyList_SetSlice(list, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, iterable);
}
static inline int
PyList_Clear(PyObject *list)
{
return PyList_SetSlice(list, 0, PY_SSIZE_T_MAX, NULL);
}
#endif
// gh-111262 added PyDict_Pop() and PyDict_PopString() to Python 3.13.0a2
#if PY_VERSION_HEX < 0x030D00A2
static inline int
PyDict_Pop(PyObject *dict, PyObject *key, PyObject **result)
{
PyObject *value;
if (!PyDict_Check(dict)) {
PyErr_BadInternalCall();
if (result) {
*result = NULL;
}
return -1;
}
// bpo-16991 added _PyDict_Pop() to Python 3.5.0b2.
// Python 3.6.0b3 changed _PyDict_Pop() first argument type to PyObject*.
// Python 3.13.0a1 removed _PyDict_Pop().
#if defined(PYPY_VERSION) || PY_VERSION_HEX < 0x030500b2 || PY_VERSION_HEX >= 0x030D0000
value = PyObject_CallMethod(dict, "pop", "O", key);
#elif PY_VERSION_HEX < 0x030600b3
value = _PyDict_Pop(_Py_CAST(PyDictObject*, dict), key, NULL);
#else
value = _PyDict_Pop(dict, key, NULL);
#endif
if (value == NULL) {
if (result) {
*result = NULL;
}
if (PyErr_Occurred() && !PyErr_ExceptionMatches(PyExc_KeyError)) {
return -1;
}
PyErr_Clear();
return 0;
}
if (result) {
*result = value;
}
else {
Py_DECREF(value);
}
return 1;
}
static inline int
PyDict_PopString(PyObject *dict, const char *key, PyObject **result)
{
PyObject *key_obj = PyUnicode_FromString(key);
if (key_obj == NULL) {
if (result != NULL) {
*result = NULL;
}
return -1;
}
int res = PyDict_Pop(dict, key_obj, result);
Py_DECREF(key_obj);
return res;
}
#endif
#if PY_VERSION_HEX < 0x030200A4
// Python 3.2.0a4 added Py_hash_t type
typedef Py_ssize_t Py_hash_t;
#endif
// gh-111545 added Py_HashPointer() to Python 3.13.0a3
#if PY_VERSION_HEX < 0x030D00A3
static inline Py_hash_t Py_HashPointer(const void *ptr)
{
#if PY_VERSION_HEX >= 0x030900A4 && !defined(PYPY_VERSION)
return _Py_HashPointer(ptr);
#else
return _Py_HashPointer(_Py_CAST(void*, ptr));
#endif
}
#endif
// Python 3.13a4 added a PyTime API.
// Use the private API added to Python 3.5.
#if PY_VERSION_HEX < 0x030D00A4 && PY_VERSION_HEX >= 0x03050000
typedef _PyTime_t PyTime_t;
#define PyTime_MIN _PyTime_MIN
#define PyTime_MAX _PyTime_MAX
static inline double PyTime_AsSecondsDouble(PyTime_t t)
{ return _PyTime_AsSecondsDouble(t); }
static inline int PyTime_Monotonic(PyTime_t *result)
{ return _PyTime_GetMonotonicClockWithInfo(result, NULL); }
static inline int PyTime_Time(PyTime_t *result)
{ return _PyTime_GetSystemClockWithInfo(result, NULL); }
static inline int PyTime_PerfCounter(PyTime_t *result)
{
#if PY_VERSION_HEX >= 0x03070000 && !defined(PYPY_VERSION)
return _PyTime_GetPerfCounterWithInfo(result, NULL);
#elif PY_VERSION_HEX >= 0x03070000
// Call time.perf_counter_ns() and convert Python int object to PyTime_t.
// Cache time.perf_counter_ns() function for best performance.
static PyObject *func = NULL;
if (func == NULL) {
PyObject *mod = PyImport_ImportModule("time");
if (mod == NULL) {
return -1;
}
func = PyObject_GetAttrString(mod, "perf_counter_ns");
Py_DECREF(mod);
if (func == NULL) {
return -1;
}
}
PyObject *res = PyObject_CallNoArgs(func);
if (res == NULL) {
return -1;
}
long long value = PyLong_AsLongLong(res);
Py_DECREF(res);
if (value == -1 && PyErr_Occurred()) {
return -1;
}
Py_BUILD_ASSERT(sizeof(value) >= sizeof(PyTime_t));
*result = (PyTime_t)value;
return 0;
#else
// Call time.perf_counter() and convert C double to PyTime_t.
// Cache time.perf_counter() function for best performance.
static PyObject *func = NULL;
if (func == NULL) {
PyObject *mod = PyImport_ImportModule("time");
if (mod == NULL) {
return -1;
}
func = PyObject_GetAttrString(mod, "perf_counter");
Py_DECREF(mod);
if (func == NULL) {
return -1;
}
}
PyObject *res = PyObject_CallNoArgs(func);
if (res == NULL) {
return -1;
}
double d = PyFloat_AsDouble(res);
Py_DECREF(res);
if (d == -1.0 && PyErr_Occurred()) {
return -1;
}
// Avoid floor() to avoid having to link to libm
*result = (PyTime_t)(d * 1e9);
return 0;
#endif
}
#endif
// gh-111389 added hash constants to Python 3.13.0a5. These constants were
// added first as private macros to Python 3.4.0b1 and PyPy 7.3.9.
#if (!defined(PyHASH_BITS) \
&& ((!defined(PYPY_VERSION) && PY_VERSION_HEX >= 0x030400B1) \
|| (defined(PYPY_VERSION) && PY_VERSION_HEX >= 0x03070000 \
&& PYPY_VERSION_NUM >= 0x07090000)))
# define PyHASH_BITS _PyHASH_BITS
# define PyHASH_MODULUS _PyHASH_MODULUS
# define PyHASH_INF _PyHASH_INF
# define PyHASH_IMAG _PyHASH_IMAG
#endif
// gh-111545 added Py_GetConstant() and Py_GetConstantBorrowed()
// to Python 3.13.0a6
#if PY_VERSION_HEX < 0x030D00A6 && !defined(Py_CONSTANT_NONE)
#define Py_CONSTANT_NONE 0
#define Py_CONSTANT_FALSE 1
#define Py_CONSTANT_TRUE 2
#define Py_CONSTANT_ELLIPSIS 3
#define Py_CONSTANT_NOT_IMPLEMENTED 4
#define Py_CONSTANT_ZERO 5
#define Py_CONSTANT_ONE 6
#define Py_CONSTANT_EMPTY_STR 7
#define Py_CONSTANT_EMPTY_BYTES 8
#define Py_CONSTANT_EMPTY_TUPLE 9
static inline PyObject* Py_GetConstant(unsigned int constant_id)
{
static PyObject* constants[Py_CONSTANT_EMPTY_TUPLE + 1] = {NULL};
if (constants[Py_CONSTANT_NONE] == NULL) {
constants[Py_CONSTANT_NONE] = Py_None;
constants[Py_CONSTANT_FALSE] = Py_False;
constants[Py_CONSTANT_TRUE] = Py_True;
constants[Py_CONSTANT_ELLIPSIS] = Py_Ellipsis;
constants[Py_CONSTANT_NOT_IMPLEMENTED] = Py_NotImplemented;
constants[Py_CONSTANT_ZERO] = PyLong_FromLong(0);
if (constants[Py_CONSTANT_ZERO] == NULL) {
goto fatal_error;
}
constants[Py_CONSTANT_ONE] = PyLong_FromLong(1);
if (constants[Py_CONSTANT_ONE] == NULL) {
goto fatal_error;
}
constants[Py_CONSTANT_EMPTY_STR] = PyUnicode_FromStringAndSize("", 0);
if (constants[Py_CONSTANT_EMPTY_STR] == NULL) {
goto fatal_error;
}
constants[Py_CONSTANT_EMPTY_BYTES] = PyBytes_FromStringAndSize("", 0);
if (constants[Py_CONSTANT_EMPTY_BYTES] == NULL) {
goto fatal_error;
}
constants[Py_CONSTANT_EMPTY_TUPLE] = PyTuple_New(0);
if (constants[Py_CONSTANT_EMPTY_TUPLE] == NULL) {
goto fatal_error;
}
// goto dance to avoid compiler warnings about Py_FatalError()
goto init_done;
fatal_error:
// This case should never happen
Py_FatalError("Py_GetConstant() failed to get constants");
}
init_done:
if (constant_id <= Py_CONSTANT_EMPTY_TUPLE) {
return Py_NewRef(constants[constant_id]);
}
else {
PyErr_BadInternalCall();
return NULL;
}
}
static inline PyObject* Py_GetConstantBorrowed(unsigned int constant_id)
{
PyObject *obj = Py_GetConstant(constant_id);
Py_XDECREF(obj);
return obj;
}
#endif
// gh-114329 added PyList_GetItemRef() to Python 3.13.0a4
#if PY_VERSION_HEX < 0x030D00A4
static inline PyObject *
PyList_GetItemRef(PyObject *op, Py_ssize_t index)
{
PyObject *item = PyList_GetItem(op, index);
Py_XINCREF(item);
return item;
}
#endif
// gh-114329 added PyList_GetItemRef() to Python 3.13.0a4
#if PY_VERSION_HEX < 0x030D00A4
static inline int
PyDict_SetDefaultRef(PyObject *d, PyObject *key, PyObject *default_value,
PyObject **result)
{
PyObject *value;
if (PyDict_GetItemRef(d, key, &value) < 0) {
// get error
if (result) {
*result = NULL;
}
return -1;
}
if (value != NULL) {
// present
if (result) {
*result = value;
}
else {
Py_DECREF(value);
}
return 1;
}
// missing: set the item
if (PyDict_SetItem(d, key, default_value) < 0) {
// set error
if (result) {
*result = NULL;
}
return -1;
}
if (result) {
*result = Py_NewRef(default_value);
}
return 0;
}
#endif
#if PY_VERSION_HEX < 0x030D00B3
# define Py_BEGIN_CRITICAL_SECTION(op) {
# define Py_END_CRITICAL_SECTION() }
# define Py_BEGIN_CRITICAL_SECTION2(a, b) {
# define Py_END_CRITICAL_SECTION2() }
#endif
#if PY_VERSION_HEX < 0x030E0000 && PY_VERSION_HEX >= 0x03060000 && !defined(PYPY_VERSION)
typedef struct PyUnicodeWriter PyUnicodeWriter;
static inline void PyUnicodeWriter_Discard(PyUnicodeWriter *writer)
{
_PyUnicodeWriter_Dealloc((_PyUnicodeWriter*)writer);
PyMem_Free(writer);
}
static inline PyUnicodeWriter* PyUnicodeWriter_Create(Py_ssize_t length)
{
if (length < 0) {
PyErr_SetString(PyExc_ValueError,
"length must be positive");
return NULL;
}
const size_t size = sizeof(_PyUnicodeWriter);
PyUnicodeWriter *pub_writer = (PyUnicodeWriter *)PyMem_Malloc(size);
if (pub_writer == _Py_NULL) {
PyErr_NoMemory();
return _Py_NULL;
}
_PyUnicodeWriter *writer = (_PyUnicodeWriter *)pub_writer;
_PyUnicodeWriter_Init(writer);
if (_PyUnicodeWriter_Prepare(writer, length, 127) < 0) {
PyUnicodeWriter_Discard(pub_writer);
return NULL;
}
writer->overallocate = 1;
return pub_writer;
}
static inline PyObject* PyUnicodeWriter_Finish(PyUnicodeWriter *writer)
{
PyObject *str = _PyUnicodeWriter_Finish((_PyUnicodeWriter*)writer);
assert(((_PyUnicodeWriter*)writer)->buffer == NULL);
PyMem_Free(writer);
return str;
}
static inline int
PyUnicodeWriter_WriteChar(PyUnicodeWriter *writer, Py_UCS4 ch)
{
if (ch > 0x10ffff) {
PyErr_SetString(PyExc_ValueError,
"character must be in range(0x110000)");
return -1;
}
return _PyUnicodeWriter_WriteChar((_PyUnicodeWriter*)writer, ch);
}
static inline int
PyUnicodeWriter_WriteStr(PyUnicodeWriter *writer, PyObject *obj)
{
PyObject *str = PyObject_Str(obj);
if (str == NULL) {
return -1;
}
int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str);
Py_DECREF(str);
return res;
}
static inline int
PyUnicodeWriter_WriteRepr(PyUnicodeWriter *writer, PyObject *obj)
{
PyObject *str = PyObject_Repr(obj);
if (str == NULL) {
return -1;
}
int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str);
Py_DECREF(str);
return res;
}
static inline int
PyUnicodeWriter_WriteUTF8(PyUnicodeWriter *writer,
const char *str, Py_ssize_t size)
{
if (size < 0) {
size = (Py_ssize_t)strlen(str);
}
PyObject *str_obj = PyUnicode_FromStringAndSize(str, size);
if (str_obj == _Py_NULL) {
return -1;
}
int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str_obj);
Py_DECREF(str_obj);
return res;
}
static inline int
PyUnicodeWriter_WriteWideChar(PyUnicodeWriter *writer,
const wchar_t *str, Py_ssize_t size)
{
if (size < 0) {
size = (Py_ssize_t)wcslen(str);
}
PyObject *str_obj = PyUnicode_FromWideChar(str, size);
if (str_obj == _Py_NULL) {
return -1;
}
int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str_obj);
Py_DECREF(str_obj);
return res;
}
static inline int
PyUnicodeWriter_WriteSubstring(PyUnicodeWriter *writer, PyObject *str,
Py_ssize_t start, Py_ssize_t end)
{
if (!PyUnicode_Check(str)) {
PyErr_Format(PyExc_TypeError, "expect str, not %T", str);
return -1;
}
if (start < 0 || start > end) {
PyErr_Format(PyExc_ValueError, "invalid start argument");
return -1;
}
if (end > PyUnicode_GET_LENGTH(str)) {
PyErr_Format(PyExc_ValueError, "invalid end argument");
return -1;
}
return _PyUnicodeWriter_WriteSubstring((_PyUnicodeWriter*)writer, str,
start, end);
}
static inline int
PyUnicodeWriter_Format(PyUnicodeWriter *writer, const char *format, ...)
{
va_list vargs;
va_start(vargs, format);
PyObject *str = PyUnicode_FromFormatV(format, vargs);
va_end(vargs);
if (str == _Py_NULL) {
return -1;
}
int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str);
Py_DECREF(str);
return res;
}
#endif // PY_VERSION_HEX < 0x030E0000
// gh-116560 added PyLong_GetSign() to Python 3.14.0a0
#if PY_VERSION_HEX < 0x030E00A0
static inline int PyLong_GetSign(PyObject *obj, int *sign)
{
if (!PyLong_Check(obj)) {
PyErr_Format(PyExc_TypeError, "expect int, got %s", Py_TYPE(obj)->tp_name);
return -1;
}
*sign = _PyLong_Sign(obj);
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif // PYTHONCAPI_COMPAT
```
|
====================================================================================================================================
SOURCE CODE FILE: schema_info.h
LINES: 1
SIZE: 3.81 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\schema_info.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/frontend/function_schema_parser.h>
#include <unordered_set>
namespace torch::utils {
using SchemaSpecialCasePair =
std::pair<c10::FunctionSchema, std::unordered_set<std::string>>;
/**
* class SchemaInfo
*
* FunctionSchema wrapper that publicizes argument value specific operator
* behavior (mutation, aliasing, special cases, etc...)
*/
struct TORCH_API SchemaInfo {
public:
explicit SchemaInfo(c10::FunctionSchema schema)
: schema_(std::move(schema)),
alias_maps_current_(false),
has_init_(false) {}
explicit SchemaInfo(const char* signature)
: schema_(torch::jit::parseSchema(signature)),
alias_maps_current_(false),
has_init_(false) {}
bool is_mutable();
bool is_mutable(const c10::SchemaArgument& argument);
bool is_mutable(std::string_view name);
bool has_argument(std::string_view name);
bool is_nondeterministic() const;
// Returns whether lhs and rhs may alias directly.
// This does not account for cases where lhs or rhs are a container that
// may contain elements that alias the other argument.
// Besides the checks already included in FunctionSchema::may_alias, this
// method also accounts special aliasing cases causes by aliasing argument
// values supplied from addArgumentValue.
bool may_alias(
const c10::SchemaArgument& lhs,
const c10::SchemaArgument& rhs);
// Returns whether lhs and rhs may alias directly or whether lhs/rhs are a
// container that may contain elements that alias the other argument. Besides
// the checks already included in FunctionSchema::may_contain_alias, this
// method also accounts for special aliasing cases causes by aliasing argument
// values supplied from addArgumentValue. bidirectional = false only returns
// whether lhs may contain an alias of rhs while bidirectional = true returns
// both directions.
bool may_contain_alias(
const c10::SchemaArgument& lhs,
const c10::SchemaArgument& rhs,
bool bidirectional = true);
void addArgumentValue(const std::string& name, const at::IValue& value);
void addArgumentValues(
const std::vector<std::optional<at::IValue>>& value_list);
void addArgumentValues(
const std::unordered_map<std::string, at::IValue>& values);
bool hasInputArgumentNamed(const std::string& name) const;
private:
// This function enforces more conservative results when the TORCH_WARN is
// triggered from above due to duplicates in an argument list
void ensureConservativity(
const std::unordered_set<at::Symbol>& duplicates,
const std::vector<c10::Argument>& arguments_list,
c10::SchemaArgType type);
void initSchemaInfo();
void generateAliasMaps();
bool mayContainAliasImpl(
const c10::SchemaArgument& lhs,
const c10::SchemaArgument& rhs);
static std::vector<c10::FunctionSchema> getNonDeterministicOps();
static std::vector<SchemaSpecialCasePair> getTrainingOps();
const std::unordered_set<c10::SchemaArgument>& wildcardSet();
const std::unordered_set<c10::SchemaArgument>& containerSet();
// Set of all wildcard arguments
std::unordered_set<c10::SchemaArgument> wildcard_set_;
// Set of all container arguments
std::unordered_set<c10::SchemaArgument> container_set_;
// Map of argument IValues
std::unordered_map<std::string, at::IValue> value_map_;
// Alias map of inputs with each other
std::vector<std::unordered_set<size_t>> input_alias_map_;
// Alias map of outputs to inputs
std::vector<std::unordered_set<size_t>> output_alias_map_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const c10::FunctionSchema schema_;
bool alias_maps_current_;
bool has_init_;
};
} // namespace torch::utils
```
|
============================================================================================================================
SOURCE CODE FILE: six.h
LINES: 1
SIZE: 1.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\six.h
ENCODING: utf-8
```h
#pragma once
#include <pybind11/pybind11.h>
#include <torch/csrc/utils/object_ptr.h>
#include <torch/csrc/utils/pybind.h>
#include <torch/csrc/utils/structseq.h>
namespace six {
// Usually instances of PyStructSequence is also an instance of tuple
// but in some py2 environment it is not, so we have to manually check
// the name of the type to determine if it is a namedtupled returned
// by a pytorch operator.
inline bool isStructSeq(pybind11::handle input) {
return pybind11::cast<std::string>(input.get_type().attr("__module__")) ==
"torch.return_types";
}
inline bool isStructSeq(PyObject* obj) {
return isStructSeq(pybind11::handle(obj));
}
inline bool isTuple(pybind11::handle input) {
if (PyTuple_Check(input.ptr())) {
return true;
}
return false;
}
inline bool isTuple(PyObject* obj) {
return isTuple(pybind11::handle(obj));
}
// maybeAsTuple: if the input is a structseq, then convert it to a tuple
//
// On Python 3, structseq is a subtype of tuple, so these APIs could be used
// directly. But on Python 2, structseq is not a subtype of tuple, so we need to
// manually create a new tuple object from structseq.
inline THPObjectPtr maybeAsTuple(PyStructSequence* obj) {
Py_INCREF(obj);
return THPObjectPtr((PyObject*)obj);
}
inline THPObjectPtr maybeAsTuple(PyObject* obj) {
if (isStructSeq(obj))
return maybeAsTuple((PyStructSequence*)obj);
Py_INCREF(obj);
return THPObjectPtr(obj);
}
} // namespace six
```
|
==================================================================================================================================
SOURCE CODE FILE: structseq.h
LINES: 1
SIZE: 0.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\structseq.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
namespace torch::utils {
PyObject* returned_structseq_repr(PyStructSequence* obj);
}
```
|
=====================================================================================================================================
SOURCE CODE FILE: tensor_apply.h
LINES: 1
SIZE: 0.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\tensor_apply.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/Tensor.h>
#include <torch/csrc/python_headers.h>
namespace torch::utils {
const at::Tensor& apply_(const at::Tensor& self, PyObject* fn);
const at::Tensor& map_(
const at::Tensor& self,
const at::Tensor& other_,
PyObject* fn);
const at::Tensor& map2_(
const at::Tensor& self,
const at::Tensor& x_,
const at::Tensor& y_,
PyObject* fn);
} // namespace torch::utils
```
|
======================================================================================================================================
SOURCE CODE FILE: tensor_dtypes.h
LINES: 1
SIZE: 0.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\tensor_dtypes.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/ScalarType.h>
#include <string>
#include <tuple>
namespace torch::utils {
std::pair<std::string, std::string> getDtypeNames(at::ScalarType scalarType);
void initializeDtypes();
} // namespace torch::utils
```
|
=======================================================================================================================================
SOURCE CODE FILE: tensor_flatten.h
LINES: 1
SIZE: 2.76 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\tensor_flatten.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <ATen/core/functional.h>
#include <c10/core/TensorOptions.h>
#include <torch/csrc/Export.h>
#include <utility>
namespace torch::utils {
/// Generate an ID for a combination of tensor backend + scalar type to be used
/// when ordering tensors ('like' tensors are grouped by pulling out their
/// backend + scalar type, so this function combines that into a single number)
inline size_t type_id(const at::Tensor& tensor) {
return static_cast<size_t>(tensor.options().backend()) *
static_cast<size_t>(at::ScalarType::NumOptions) +
static_cast<size_t>(tensor.scalar_type());
}
inline at::Tensor flatten_dense_tensors(at::TensorList tensors) {
return at::flatten_dense_tensors(tensors);
}
inline std::vector<at::Tensor> unflatten_dense_tensors(
const at::Tensor& flat,
at::TensorList tensors) {
return at::unflatten_dense_tensors(flat, tensors);
}
struct TensorGroup {
std::vector<at::Tensor> tensors;
size_t size = 0;
size_t type_id() {
AT_ASSERT(!tensors.empty());
return ::torch::utils::type_id(tensors[0]);
}
const at::TensorOptions options() {
AT_ASSERT(!tensors.empty());
return tensors[0].options();
}
};
// Helper function that takes a list of tensors and splits them into tensor
// groups by the size limit and outputs these tensor groups. If the input
// tensors are of different tensor types, they will be split into different
// groups as well.
//
// Two options of splitting provided to the user,
//
// Imagine the size_limit is 256 and the list of input tensors are:
// tensor_a(fp16 - 128 bytes),
// tensor_b(fp32 - 256 bytes),
// tensor_c(fp16 - 128 bytes),
//
// when fine_grained == false:
// The function will read the list of tensors sequentially and accumulate
// enough tensors for each data type until the size_limit, therefore:
// it will output: {{tensor_a, tensor_c}, {tensor_b}}
//
// when fine_grained == true:
// The function will read the list of tensors sequentially and accumulate
// enough tensors for all data types until the size_limit, and then split
// the accumulated tensors into different groups by data types, therefore:
// it will output: {{tensor_a}, {tensor_b}, {tensor_c}}
TORCH_API std::vector<TensorGroup> take_tensors(
at::TensorList tensors,
size_t size_limit,
bool fine_grained = false);
TORCH_API void reorder_tensors_like(
std::vector<at::Tensor>& tensors,
at::TensorList order);
TORCH_API std::pair<at::Tensor, at::Tensor> flatten_sparse_tensors(
at::TensorList tensors);
TORCH_API std::vector<at::Tensor> unflatten_sparse_tensors(
const at::Tensor& flat_indices,
const at::Tensor& flat_values,
at::TensorList tensors);
} // namespace torch::utils
```
|
=======================================================================================================================================
SOURCE CODE FILE: tensor_layouts.h
LINES: 1
SIZE: 0.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\tensor_layouts.h
ENCODING: utf-8
```h
#pragma once
namespace torch::utils {
void initializeLayouts();
}
```
|
====================================================================================================================================
SOURCE CODE FILE: tensor_list.h
LINES: 1
SIZE: 0.18 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\tensor_list.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
namespace at {
class Tensor;
}
namespace torch::utils {
PyObject* tensor_to_list(const at::Tensor& tensor);
}
```
|
=============================================================================================================================================
SOURCE CODE FILE: tensor_memoryformats.h
LINES: 1
SIZE: 0.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\tensor_memoryformats.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/MemoryFormat.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/utils/python_stub.h>
namespace torch::utils {
void initializeMemoryFormats();
// This methods returns a borrowed reference!
TORCH_PYTHON_API PyObject* getTHPMemoryFormat(c10::MemoryFormat);
} // namespace torch::utils
```
|
===================================================================================================================================
SOURCE CODE FILE: tensor_new.h
LINES: 1
SIZE: 4.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\tensor_new.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/python_arg_parser.h>
#include <ATen/core/Tensor.h>
namespace torch::utils {
// NOTE: [torch.tensor, lift_fresh, and device movement]
//
// The `only_lift_cpu_tensors` flag controls what happens on torch.tensor([1, 2,
// 3], device="cuda") (or any non-CPU devices).
//
// If false (default):
// - the data gets moved into a CPU Tensor
// - then, it gets moved to cuda (via .to)
// - finally, we call lift_fresh() on it.
// Steps 1 and 2 happen with all modes disabled.
//
// If true:
// - the data gets moved into a CPU Tensor (with correct dtype)
// - we call lift_fresh() on it
// - finally, we move it to cuda (via .to)
// Step 1 happens with all modes disabled.
//
// `only_lift_cpu_tensors=true` is useful to prevent CUDA initialization under
// FakeTensorMode because it avoids moving concrete data to CUDA.
TORCH_API bool only_lift_cpu_tensors();
TORCH_API void set_only_lift_cpu_tensors(bool value);
at::Tensor base_tensor_ctor(PyObject* args, PyObject* kwargs);
TORCH_PYTHON_API at::Tensor legacy_tensor_ctor(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PyObject* args,
PyObject* kwargs);
at::Tensor legacy_tensor_new(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PyObject* args,
PyObject* kwargs);
at::Tensor indexing_tensor_from_data(
c10::TensorOptions options,
at::ScalarType scalar_type,
std::optional<at::Device> device,
PyObject* data);
at::Tensor sparse_coo_tensor_ctor(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PythonArgs& r);
void _validate_sparse_coo_tensor_args(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PyObject* args,
PyObject* kwargs);
at::Tensor sparse_compressed_tensor_ctor(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PythonArgs& r);
at::Tensor sparse_csr_tensor_ctor(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PythonArgs& r);
at::Tensor sparse_csc_tensor_ctor(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PythonArgs& r);
at::Tensor sparse_bsr_tensor_ctor(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PythonArgs& r);
at::Tensor sparse_bsc_tensor_ctor(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PythonArgs& r);
void _validate_sparse_compressed_tensor_args(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PyObject* args,
PyObject* kwargs);
void _validate_sparse_csr_tensor_args(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PyObject* args,
PyObject* kwargs);
void _validate_sparse_csc_tensor_args(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PyObject* args,
PyObject* kwargs);
void _validate_sparse_bsr_tensor_args(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PyObject* args,
PyObject* kwargs);
void _validate_sparse_bsc_tensor_args(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PyObject* args,
PyObject* kwargs);
at::Tensor tensor_ctor(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PythonArgs& r);
at::Tensor as_tensor(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PythonArgs& r);
at::Tensor new_tensor(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PyObject* args,
PyObject* kwargs);
at::Tensor new_ones(
c10::DispatchKey dispatch_key,
at::ScalarType scalar_type,
PyObject* args,
PyObject* kwargs);
at::Tensor tensor_frombuffer(
PyObject* buffer,
at::ScalarType dtype,
int64_t count,
int64_t offset,
bool requires_grad);
at::Tensor tensor_fromDLPack(PyObject* data);
at::Tensor asarray(
PyObject* obj,
std::optional<c10::ScalarType> dtype,
std::optional<c10::Device> device,
std::optional<bool> copy,
bool requires_grad);
} // namespace torch::utils
```
|
=====================================================================================================================================
SOURCE CODE FILE: tensor_numpy.h
LINES: 1
SIZE: 0.72 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\tensor_numpy.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/Tensor.h>
#include <torch/csrc/python_headers.h>
namespace torch::utils {
PyObject* tensor_to_numpy(const at::Tensor& tensor, bool force = false);
at::Tensor tensor_from_numpy(PyObject* obj, bool warn_if_not_writeable = true);
int aten_to_numpy_dtype(const at::ScalarType scalar_type);
at::ScalarType numpy_dtype_to_aten(int dtype);
bool is_numpy_available();
bool is_numpy_int(PyObject* obj);
bool is_numpy_bool(PyObject* obj);
bool is_numpy_scalar(PyObject* obj);
void warn_numpy_not_writeable();
at::Tensor tensor_from_cuda_array_interface(PyObject* obj);
void validate_numpy_for_dlpack_deleter_bug();
bool is_numpy_dlpack_deleter_bugged();
} // namespace torch::utils
```
|
========================================================================================================================================
SOURCE CODE FILE: tensor_qschemes.h
LINES: 1
SIZE: 0.18 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\tensor_qschemes.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/QScheme.h>
namespace torch::utils {
PyObject* getTHPQScheme(at::QScheme qscheme);
void initializeQSchemes();
} // namespace torch::utils
```
|
=====================================================================================================================================
SOURCE CODE FILE: tensor_types.h
LINES: 1
SIZE: 0.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\tensor_types.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/DeprecatedTypeProperties.h>
#include <c10/core/TensorOptions.h>
#include <utility>
#include <vector>
namespace torch::utils {
std::string options_to_string(const at::TensorOptions& options);
std::string type_to_string(const at::DeprecatedTypeProperties& type);
at::TensorOptions options_from_string(const std::string& str);
// return a vector of all "declared" types, even those that weren't compiled
std::vector<std::pair<at::Backend, at::ScalarType>> all_declared_types();
// return python module name of backend, like torch.cuda, torch.foo
const char* backend_to_string(const at::Backend& backend);
} // namespace torch::utils
```
|
=================================================================================================================================================
SOURCE CODE FILE: throughput_benchmark-inl.h
LINES: 1
SIZE: 6.35 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\throughput_benchmark-inl.h
ENCODING: utf-8
```h
#pragma once
#include <random>
#include <thread>
#include <torch/csrc/autograd/profiler.h>
#include <torch/csrc/jit/python/pybind_utils.h>
#include <torch/csrc/utils/pybind.h>
#include <ATen/Parallel.h>
#include <ATen/autocast_mode.h>
#include <c10/core/GradMode.h>
#include <c10/core/impl/LocalDispatchKeySet.h>
#include <c10/util/irange.h>
namespace torch::throughput_benchmark::detail {
template <class Input, class Output, class Model>
BenchmarkExecutionStats BenchmarkHelper<Input, Output, Model>::benchmark(
const BenchmarkConfig& config) const {
CHECK(initialized_);
TORCH_CHECK(
config.num_worker_threads == 1,
"Only parallelization by callers is supported");
LOG(INFO) << at::get_parallel_info();
// We pre-generate inputs here for each of the threads. This allows us to
// safely move inputs out for each of the threads independently and thus avoid
// overhead from the benchmark runner itself
std::vector<std::vector<Input>> thread_inputs(config.num_calling_threads);
std::vector<size_t> input_iters(config.num_calling_threads);
{
std::random_device seeder;
std::mt19937 engine(seeder());
TORCH_CHECK(
!inputs_.empty(),
"Please provide benchmark inputs."
"Did you forget to call add_input()? ");
std::uniform_int_distribution<int> dist(0, inputs_.size() - 1);
for (const auto thread_id : c10::irange(config.num_calling_threads)) {
// Just in case we generate num_iters inputs for each of the threads
// This was if one thread does all the work we will be fine
for (const auto i [[maybe_unused]] :
c10::irange(config.num_iters + config.num_warmup_iters)) {
thread_inputs[thread_id].push_back(cloneInput(inputs_[dist(engine)]));
}
input_iters[thread_id] = 0;
}
}
std::mutex m;
std::condition_variable worker_main_cv;
std::condition_variable main_worker_cv;
// TODO: add GUARDED_BY once it is available
int64_t initialized{0};
int64_t finished{0};
bool start{false};
std::atomic<int64_t> num_attempted_iters{0};
std::vector<std::thread> callers;
callers.reserve(config.num_calling_threads);
static constexpr auto& DEVICES = at::autocast::_AUTOCAST_SUPPORTED_DEVICES;
std::array<bool, DEVICES.size()> autocast_enabled;
std::array<at::ScalarType, DEVICES.size()> autocast_dtype;
for (size_t i = 0; i < DEVICES.size(); i++) {
autocast_enabled[i] = at::autocast::is_autocast_enabled(DEVICES[i]);
autocast_dtype[i] = at::autocast::get_autocast_dtype(DEVICES[i]);
}
bool autocast_cache_enabled = at::autocast::is_autocast_cache_enabled();
bool tls_grad_enabled = c10::GradMode::is_enabled();
c10::impl::LocalDispatchKeySet tls_key_set =
c10::impl::tls_local_dispatch_key_set();
for (const auto thread_id : c10::irange(config.num_calling_threads)) {
callers.emplace_back([&, thread_id]() {
// We use conditional variable as a barrier to make sure each thread
// performs required warmeup iterations before we start measuring
c10::GradMode::set_enabled(tls_grad_enabled);
c10::impl::_force_tls_local_dispatch_key_set(tls_key_set);
for (size_t i = 0; i < DEVICES.size(); i++) {
at::autocast::set_autocast_enabled(DEVICES[i], autocast_enabled[i]);
at::autocast::set_autocast_dtype(DEVICES[i], autocast_dtype[i]);
}
at::autocast::set_autocast_cache_enabled(autocast_cache_enabled);
for (const auto j : c10::irange(config.num_warmup_iters)) {
(void)j;
runOnce(std::move(thread_inputs[thread_id][input_iters[thread_id]]));
++input_iters[thread_id];
}
{
std::unique_lock<std::mutex> lock(m);
++initialized;
worker_main_cv.notify_one();
// NOLINTNEXTLINE(bugprone-infinite-loop)
while (!start) {
main_worker_cv.wait(lock);
}
}
LOG(INFO) << "Starting forward thread " << thread_id;
while (num_attempted_iters.fetch_add(1) < config.num_iters) {
runOnce(std::move(thread_inputs[thread_id][input_iters[thread_id]]));
++input_iters[thread_id];
}
{
std::unique_lock<std::mutex> lock(m);
++finished;
worker_main_cv.notify_one();
LOG(INFO) << "Shutting down forward thread " << thread_id
<< ". Total number of finished threads: " << finished;
}
});
}
using Clock = std::chrono::high_resolution_clock;
using RecordProfile = torch::autograd::profiler::RecordProfile;
using TimePoint = std::chrono::time_point<Clock>;
TimePoint start_time;
std::unique_ptr<RecordProfile> profiler_guard;
{
std::unique_lock<std::mutex> lock(m);
while (initialized != config.num_calling_threads) {
worker_main_cv.wait(lock);
}
if (!config.profiler_output_path.empty()) {
LOG(INFO) << "Using Autograd profiler. Trace will be saved to "
<< config.profiler_output_path;
profiler_guard =
std::make_unique<RecordProfile>(config.profiler_output_path);
}
LOG(INFO) << "Starting threads";
start = true;
start_time = Clock::now();
}
main_worker_cv.notify_all();
{
std::unique_lock<std::mutex> lock(m);
worker_main_cv.wait(
lock, [&]() { return finished == config.num_calling_threads; });
}
auto end_time = std::chrono::high_resolution_clock::now();
profiler_guard.reset();
LOG(INFO) << "Finished benchmark";
BenchmarkExecutionStats stats;
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
float total_time_ms = std::chrono::duration_cast<std::chrono::nanoseconds>(
end_time - start_time)
.count() /
1000.0 / 1000.0;
// We use config.num_iters instead of num_attempted_iters as it is
// repsesatative of the real work done. Last attempted iteration on each
// calling threads doesn't represent the real work (i.e. running the model)
stats.latency_avg_ms =
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
total_time_ms * config.num_calling_threads / config.num_iters;
stats.num_iters = config.num_iters;
for (auto& t : callers) {
t.join();
}
return stats;
}
} // namespace torch::throughput_benchmark::detail
```
|
=============================================================================================================================================
SOURCE CODE FILE: throughput_benchmark.h
LINES: 1
SIZE: 6.97 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\throughput_benchmark.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/ivalue.h>
#include <pybind11/pybind11.h>
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/utils/pybind.h>
#include <torch/csrc/jit/python/pybind_utils.h>
#include <iosfwd>
#include <memory>
#include <string>
#include <vector>
namespace py = pybind11;
namespace torch::throughput_benchmark {
/**
* The struct is used to provide results of a benchmark to the caller
* In the future all additional statics should be added here.
*/
struct BenchmarkExecutionStats {
float latency_avg_ms{-1};
int64_t num_iters{-1};
};
std::ostream& operator<<(
std::ostream& os,
const BenchmarkExecutionStats& value);
/**
* Use this struct in order to configure a throughput benchmark run.
* This struct should include parameters related to threading, batching, number
* of iterations, warm-up, etc. More configs can be added as needed.
* General rule here is that only things that c++ must(!) to be aware of should
* be here. If we can keep other parts in python, we should keep them there.
* This is typical for things that are not perf critical and don't affect
* execution statistics benchmark returns.
*/
struct BenchmarkConfig {
public:
// Calling threads are those threads that are calling into a module in
// parallel.
int num_calling_threads{1};
// Worker threads are not supported yet. This is just an example that we plan
// to support some sort of multi-threaded forward calls. We may change this
// setting in the future to support different intra and inter op parallelism
// which is not available in PyTorch yet
int num_worker_threads{1};
// Warmup iters are used to make sure we run a module a few times before
// actually measuring things. This way we avoid cold caches and any other
// similar problems
int num_warmup_iters{1};
// Number of iterations the benchmark should run with. This number is separate
// from the warmup iterations
int64_t num_iters{100};
// If set autograd profiler will be enabled. I.e. this variable would be
// created before the main benchmark loop (but after the warmup):
// RecordProfile guard(profiler_output_path);
std::string profiler_output_path;
};
namespace detail {
/**
* A helper class to abstract out different models we test throughput of
*/
template <class Input, class Output, class Model>
class BenchmarkHelper {
public:
BenchmarkHelper();
explicit BenchmarkHelper(Model model)
: model_(std::move(model)), initialized_(true) {}
// This method to be used in benchmark() method
// Note that there is no result. This way we don't have to call this under GIL
// even when running in the nn.Module mode. Otherwise destructor of the result
// would race with Python
void runOnce(Input&&) const;
// This method is to be used when calling from Python directly
Output runOnce(const py::args&, const py::kwargs&) const;
// Aggregate input in the format Model expects in order to avoid further
// conversions at the benchmark time
void addInput(py::args&&, py::kwargs&&);
void addInput(Input&&);
BenchmarkExecutionStats benchmark(const BenchmarkConfig& config) const;
bool initialized() const {
return initialized_;
}
// Destructor doesn't require the GIL because it is going to be executed on
// the PyThon thread
std::vector<Input> inputs_;
Model model_;
bool initialized_{false};
};
struct C10_HIDDEN ModuleInput {
ModuleInput(ModuleInput&& other) = default;
ModuleInput(const ModuleInput&) = delete;
ModuleInput& operator=(ModuleInput& other) = delete;
ModuleInput& operator=(ModuleInput&& other) = delete;
~ModuleInput() = default;
ModuleInput(py::args&& args, py::kwargs&& kwargs)
: args(std::move(args)), kwargs(std::move(kwargs)) {}
py::args args;
py::kwargs kwargs;
};
typedef py::object ModuleOutput;
typedef std::vector<at::IValue> ScriptModuleInput;
typedef at::IValue ScriptModuleOutput;
template <class Input>
Input cloneInput(const Input& input);
typedef BenchmarkHelper<ScriptModuleInput, at::IValue, jit::Module>
ScriptModuleBenchmark;
template <>
inline BenchmarkHelper<ScriptModuleInput, at::IValue, jit::Module>::
BenchmarkHelper()
: model_("Module", std::make_shared<jit::CompilationUnit>()),
initialized_(false) {}
typedef BenchmarkHelper<ModuleInput, py::object, py::object> ModuleBenchmark;
template <>
inline BenchmarkHelper<ModuleInput, py::object, py::object>::BenchmarkHelper()
: initialized_(false) {}
template <>
void ScriptModuleBenchmark::runOnce(ScriptModuleInput&& input) const;
template <>
ScriptModuleOutput ScriptModuleBenchmark::runOnce(
const py::args& args,
const py::kwargs& kwargs) const;
template <>
void ModuleBenchmark::runOnce(ModuleInput&& input) const;
template <>
ModuleOutput ModuleBenchmark::runOnce(
const py::args& args,
const py::kwargs& kwargs) const;
template <>
void ScriptModuleBenchmark::addInput(py::args&& args, py::kwargs&& kwargs);
template <>
void ScriptModuleBenchmark::addInput(ScriptModuleInput&& input);
template <>
void ModuleBenchmark::addInput(py::args&& args, py::kwargs&& kwargs);
} // namespace detail
/**
* This class is a small c++ component responsible for executing a PyTorch
* module under an inference server like load. It can emulate multiple calling
* threads to a single module provided. In the future we plan to enhance this
* component to support inter and intra-op parallelism as well as multiple
* models running in a single process.
*
* For current available configurations refer to the BenchmarkConfig
* documentation
*
* The class supports working with either nn.Module or ScriptModule.
* Under the hood it just dispatches to corresponding specialization of
* class BenchmarkHelper<Input, Output, Model>
*/
class C10_HIDDEN ThroughputBenchmark {
public:
explicit ThroughputBenchmark(const jit::Module& module);
explicit ThroughputBenchmark(py::object module);
// Add one more input example. This input example should be in the exact
// format the module under test expects. It is responsibility of the module to
// perform any such format checks, the benchmark doesn't perform any
// validation of its own
void addInput(py::args args, py::kwargs kwargs);
// Equivalent to just running the model directly on the given input
py::object runOnce(const py::args& args, const py::kwargs& kwargs);
// The main method of the class allows to perform a multi-threaded benchmark
// It returns BenchmarkExecutionStats object with a lot of useful statistics
// about runtime execution. We can enhance this class in the future to provide
// more information to the user
BenchmarkExecutionStats benchmark(const BenchmarkConfig& config) const;
private:
detail::ScriptModuleBenchmark script_module_;
detail::ModuleBenchmark module_;
};
} // namespace torch::throughput_benchmark
#include <torch/csrc/utils/throughput_benchmark-inl.h>
```
|
============================================================================================================================================
SOURCE CODE FILE: torch_dispatch_mode.h
LINES: 1
SIZE: 2.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\torch_dispatch_mode.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/impl/TorchDispatchModeTLS.h>
namespace torch::torch_dispatch_mode {
struct StashTorchDispatchModeGuard {
public:
StashTorchDispatchModeGuard() {
if (c10::impl::TorchDispatchModeTLS::any_modes_set(
/*skip_infra_modes=*/true)) {
saved_mode_ = c10::impl::TorchDispatchModeTLS::pop_stack();
} else {
auto mode_and_key =
c10::impl::TorchDispatchModeTLS::pop_highest_infra_mode();
saved_mode_ = std::move(std::get<0>(mode_and_key));
saved_mode_key_ = std::get<1>(mode_and_key);
}
}
~StashTorchDispatchModeGuard() {
if (saved_mode_key_.has_value()) {
c10::impl::TorchDispatchModeTLS::set_mode(
saved_mode_, saved_mode_key_.value());
} else {
c10::impl::TorchDispatchModeTLS::push_non_infra_mode_onto_stack(
std::move(saved_mode_));
}
}
StashTorchDispatchModeGuard(const StashTorchDispatchModeGuard&) = delete;
StashTorchDispatchModeGuard(StashTorchDispatchModeGuard&&) = delete;
StashTorchDispatchModeGuard& operator=(const StashTorchDispatchModeGuard&) =
delete;
StashTorchDispatchModeGuard& operator=(StashTorchDispatchModeGuard&&) =
delete;
const std::shared_ptr<c10::impl::PyObject_TorchDispatchMode>& get_cur_mode() {
return saved_mode_;
}
private:
std::shared_ptr<c10::impl::PyObject_TorchDispatchMode> saved_mode_;
std::optional<c10::impl::TorchDispatchModeKey> saved_mode_key_;
};
struct StashTorchDispatchStackGuard {
public:
StashTorchDispatchStackGuard() {
auto old = c10::impl::TorchDispatchModeTLS::get_state();
c10::impl::TorchDispatchModeTLS::set_state(std::move(saved_state_));
saved_state_ = std::move(old);
}
StashTorchDispatchStackGuard(const StashTorchDispatchStackGuard&) = delete;
StashTorchDispatchStackGuard(StashTorchDispatchStackGuard&&) = delete;
StashTorchDispatchStackGuard& operator=(const StashTorchDispatchStackGuard&) =
delete;
StashTorchDispatchStackGuard& operator=(StashTorchDispatchStackGuard&&) =
delete;
~StashTorchDispatchStackGuard() {
c10::impl::TorchDispatchModeTLS::set_state(std::move(saved_state_));
}
private:
c10::impl::TorchDispatchModeTLS saved_state_;
};
} // namespace torch::torch_dispatch_mode
```
|
=================================================================================================================================
SOURCE CODE FILE: variadic.h
LINES: 1
SIZE: 3.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\variadic.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/core/Variadic.h>
#include <torch/csrc/autograd/variable.h>
#include <type_traits>
#include <utility>
namespace torch {
using at::IterArgs;
struct CountTensors : IterArgs<CountTensors> {
size_t out = 0;
void operator()(const at::Tensor& x) {
out += 1;
}
void operator()(const std::optional<at::Tensor>& x) {
out += x.has_value();
}
void operator()(at::ArrayRef<at::Tensor> xs) {
out += xs.size();
}
};
template <typename... Args>
size_t count_tensors(Args&&... args) {
return CountTensors().apply(std::forward<Args>(args)...).out;
}
struct CountVariables : IterArgs<CountVariables> {
size_t out = 0;
void operator()(const autograd::Variable& x) {
out += 1;
}
void operator()(at::ArrayRef<autograd::Variable> xs) {
out += xs.size();
}
};
template <typename... Args>
inline size_t count_variables(Args&&... args) {
return CountVariables().apply(std::forward<Args>(args)...).out;
}
//===----------------------------------------------------------------------===//
// std::index_sequence shim for C++11
//===----------------------------------------------------------------------===//
// A container of type-template parameter indices.
template <size_t... Is>
struct Indices {};
// Decrements the index N, adds N-1 to the list of indices and forwards
// whatever we already have.
template <size_t N, size_t... Is>
struct MakeIndices : MakeIndices<N - 1, N - 1, Is...> {};
// Partial specialization that forms our base case. When N is zero, we stop
// and define a typedef that will be visible to earlier classes due to
// inheritance. The typedef we define is an index list containing the numbers
// 0 through N-1.
template <size_t... Is>
struct MakeIndices<0, Is...> {
using indices = Indices<Is...>;
};
//===----------------------------------------------------------------------===//
// Utilities
//===----------------------------------------------------------------------===//
template <typename Function, typename... Ts>
void apply(Function function, Ts&&... ts) {
// https://stackoverflow.com/questions/13978916/inserting-a-variadic-argument-list-into-a-vector
// Creates a dummy array, so that each function call is evaluated in order.
// `(function(), 0)` is because `function` should (!) return `void`, so
// according to the comma operator, it is evaluated and its result (`void`)
// is discarded. Then the zero is evaluated and used as an element in the
// array. The first zero ensures the array is not empty.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
int _[]{0, (function(std::forward<Ts>(ts)), 0)...};
(void)_;
}
template <
typename ReturnType,
typename... Ts,
typename Function,
typename Accessor>
ReturnType unpack(Function function, Accessor accessor) {
return ReturnType(unpack<ReturnType, Ts...>(
std::move(function),
std::move(accessor),
typename MakeIndices<sizeof...(Ts)>::indices()));
}
template <
typename ReturnType,
typename... Ts,
typename Function,
typename Accessor,
size_t... Is>
ReturnType unpack(Function function, Accessor accessor, Indices<Is...>) {
return ReturnType(function(accessor.template operator()<Ts>(Is)...));
}
} // namespace torch
```
|
================================================================================================================================
SOURCE CODE FILE: verbose.h
LINES: 1
SIZE: 0.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\utils\verbose.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
namespace torch {
void initVerboseBindings(PyObject* module);
} // namespace torch
```
|
============================================================================================================================
SOURCE CODE FILE: Event.h
LINES: 1
SIZE: 0.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\xpu\Event.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/xpu/XPUEvent.h>
#include <torch/csrc/Event.h>
#include <torch/csrc/python_headers.h>
struct THXPEvent : THPEvent {
at::xpu::XPUEvent xpu_event;
};
extern PyObject* THXPEventClass;
void THXPEvent_init(PyObject* module);
inline bool THXPEvent_Check(PyObject* obj) {
return THXPEventClass && PyObject_IsInstance(obj, THXPEventClass);
}
```
|
=============================================================================================================================
SOURCE CODE FILE: Module.h
LINES: 1
SIZE: 0.18 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\xpu\Module.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
PyMethodDef* THXPModule_methods();
namespace torch::xpu {
void initModule(PyObject* module);
} // namespace torch::xpu
```
|
=============================================================================================================================
SOURCE CODE FILE: Stream.h
LINES: 1
SIZE: 0.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\xpu\Stream.h
ENCODING: utf-8
```h
#pragma once
#include <c10/xpu/XPUStream.h>
#include <torch/csrc/Stream.h>
#include <torch/csrc/python_headers.h>
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct THXPStream : THPStream {
at::xpu::XPUStream xpu_stream;
};
extern PyObject* THXPStreamClass;
void THXPStream_init(PyObject* module);
inline bool THXPStream_Check(PyObject* obj) {
return THXPStreamClass && PyObject_IsInstance(obj, THXPStreamClass);
}
```
|
==========================================================================================================================
SOURCE CODE FILE: custom_class.h
LINES: 1
SIZE: 19.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\custom_class.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/builtin_function.h>
#include <ATen/core/function_schema.h>
#include <ATen/core/ivalue.h>
#include <ATen/core/class_type.h>
#include <ATen/core/op_registration/infer_schema.h>
#include <ATen/core/stack.h>
#include <c10/util/C++17.h>
#include <c10/util/Metaprogramming.h>
#include <c10/util/TypeList.h>
#include <c10/util/TypeTraits.h>
#include <torch/custom_class_detail.h>
#include <torch/library.h>
#include <functional>
#include <sstream>
namespace torch {
/// This function is used in conjunction with `class_::def()` to register
/// a constructor for a given C++ class type. For example,
/// `torch::init<int, std::string>()` would register a two-argument constructor
/// taking an `int` and a `std::string` as argument.
template <class... Types>
detail::types<void, Types...> init() {
return detail::types<void, Types...>{};
}
template <typename Func, typename... ParameterTypeList>
struct InitLambda {
Func f;
};
template <typename Func>
decltype(auto) init(Func&& f) {
using InitTraits = c10::guts::infer_function_traits_t<std::decay_t<Func>>;
using ParameterTypeList = typename InitTraits::parameter_types;
InitLambda<Func, ParameterTypeList> init{std::forward<Func>(f)};
return init;
}
/// Entry point for custom C++ class registration. To register a C++ class
/// in PyTorch, instantiate `torch::class_` with the desired class as the
/// template parameter. Typically, this instantiation should be done in
/// the initialization of a global variable, so that the class will be
/// made available on dynamic library loading without any additional API
/// calls needed. For example, to register a class named Foo, you might
/// create a global variable like so:
///
/// static auto register_foo = torch::class_<Foo>("myclasses", "Foo")
/// .def("myMethod", &Foo::myMethod)
/// .def("lambdaMethod", [](const c10::intrusive_ptr<Foo>& self) {
/// // Do something with `self`
/// });
///
/// In addition to registering the class, this registration also chains
/// `def()` calls to register methods. `myMethod()` is registered with
/// a pointer to the Foo class's `myMethod()` method. `lambdaMethod()`
/// is registered with a C++ lambda expression.
template <class CurClass>
class class_ : public ::torch::detail::class_base {
static_assert(
std::is_base_of_v<CustomClassHolder, CurClass>,
"torch::class_<T> requires T to inherit from CustomClassHolder");
public:
/// This constructor actually registers the class type.
/// String argument `namespaceName` is an identifier for the
/// namespace you would like this class to appear in.
/// String argument `className` is the name you would like to
/// see this class exposed as in Python and TorchScript. For example, if
/// you pass `foo` as the namespace name and `Bar` as the className, the
/// class will appear as `torch.classes.foo.Bar` in Python and TorchScript
explicit class_(
const std::string& namespaceName,
const std::string& className,
std::string doc_string = "")
: class_base(
namespaceName,
className,
std::move(doc_string),
typeid(c10::intrusive_ptr<CurClass>),
typeid(c10::tagged_capsule<CurClass>)) {}
/// def() can be used in conjunction with `torch::init()` to register
/// a constructor for a given C++ class type. For example, passing
/// `torch::init<int, std::string>()` would register a two-argument
/// constructor taking an `int` and a `std::string` as argument.
template <typename... Types>
class_& def(
torch::detail::types<void, Types...>,
std::string doc_string = "",
std::initializer_list<arg> default_args =
{}) { // Used in combination with
// torch::init<...>()
auto func = [](c10::tagged_capsule<CurClass> self, Types... args) {
auto classObj = c10::make_intrusive<CurClass>(args...);
auto object = self.ivalue.toObject();
object->setSlot(0, c10::IValue::make_capsule(std::move(classObj)));
};
defineMethod(
"__init__",
std::move(func),
std::move(doc_string),
default_args);
return *this;
}
// Used in combination with torch::init([]lambda(){......})
template <typename Func, typename... ParameterTypes>
class_& def(
InitLambda<Func, c10::guts::typelist::typelist<ParameterTypes...>> init,
std::string doc_string = "",
std::initializer_list<arg> default_args = {}) {
auto init_lambda_wrapper = [func = std::move(init.f)](
c10::tagged_capsule<CurClass> self,
ParameterTypes... arg) {
c10::intrusive_ptr<CurClass> classObj =
std::invoke(func, std::forward<ParameterTypes>(arg)...);
auto object = self.ivalue.toObject();
object->setSlot(0, c10::IValue::make_capsule(classObj));
};
defineMethod(
"__init__",
std::move(init_lambda_wrapper),
std::move(doc_string),
default_args);
return *this;
}
/// This is the normal method registration API. `name` is the name that
/// the method will be made accessible by in Python and TorchScript.
/// `f` is a callable object that defines the method. Typically `f`
/// will either be a pointer to a method on `CurClass`, or a lambda
/// expression that takes a `c10::intrusive_ptr<CurClass>` as the first
/// argument (emulating a `this` argument in a C++ method.)
///
/// Examples:
///
/// // Exposes method `foo` on C++ class `Foo` as `call_foo()` in
/// // Python and TorchScript
/// .def("call_foo", &Foo::foo)
///
/// // Exposes the given lambda expression as method `call_lambda()`
/// // in Python and TorchScript.
/// .def("call_lambda", [](const c10::intrusive_ptr<Foo>& self) {
/// // do something
/// })
template <typename Func>
class_& def(
std::string name,
Func f,
std::string doc_string = "",
std::initializer_list<arg> default_args = {}) {
auto wrapped_f = detail::wrap_func<CurClass, Func>(std::move(f));
defineMethod(
std::move(name),
std::move(wrapped_f),
std::move(doc_string),
default_args);
return *this;
}
/// Method registration API for static methods.
template <typename Func>
class_& def_static(std::string name, Func func, std::string doc_string = "") {
auto qualMethodName = qualClassName + "." + name;
auto schema =
c10::inferFunctionSchemaSingleReturn<Func>(std::move(name), "");
auto wrapped_func =
[func = std::move(func)](jit::Stack& stack) mutable -> void {
using RetType =
typename c10::guts::infer_function_traits_t<Func>::return_type;
detail::BoxedProxy<RetType, Func>()(stack, func);
};
auto method = std::make_unique<jit::BuiltinOpFunction>(
std::move(qualMethodName),
std::move(schema),
std::move(wrapped_func),
std::move(doc_string));
classTypePtr->addStaticMethod(method.get());
registerCustomClassMethod(std::move(method));
return *this;
}
/// Property registration API for properties with both getter and setter
/// functions.
template <typename GetterFunc, typename SetterFunc>
class_& def_property(
const std::string& name,
GetterFunc getter_func,
SetterFunc setter_func,
std::string doc_string = "") {
torch::jit::Function* getter{};
torch::jit::Function* setter{};
auto wrapped_getter =
detail::wrap_func<CurClass, GetterFunc>(std::move(getter_func));
getter = defineMethod(name + "_getter", wrapped_getter, doc_string);
auto wrapped_setter =
detail::wrap_func<CurClass, SetterFunc>(std::move(setter_func));
setter = defineMethod(name + "_setter", wrapped_setter, doc_string);
classTypePtr->addProperty(name, getter, setter);
return *this;
}
/// Property registration API for properties with only getter function.
template <typename GetterFunc>
class_& def_property(
const std::string& name,
GetterFunc getter_func,
std::string doc_string = "") {
torch::jit::Function* getter{};
auto wrapped_getter =
detail::wrap_func<CurClass, GetterFunc>(std::move(getter_func));
getter = defineMethod(name + "_getter", wrapped_getter, doc_string);
classTypePtr->addProperty(name, getter, nullptr);
return *this;
}
/// Property registration API for properties with read-write access.
template <typename T>
class_& def_readwrite(const std::string& name, T CurClass::*field) {
auto getter_func = [field =
field](const c10::intrusive_ptr<CurClass>& self) {
return self.get()->*field;
};
auto setter_func = [field = field](
const c10::intrusive_ptr<CurClass>& self, T value) {
self.get()->*field = value;
};
return def_property(name, getter_func, setter_func);
}
/// Property registration API for properties with read-only access.
template <typename T>
class_& def_readonly(const std::string& name, T CurClass::*field) {
auto getter_func =
[field = std::move(field)](const c10::intrusive_ptr<CurClass>& self) {
return self.get()->*field;
};
return def_property(name, getter_func);
}
/// This is an unsafe method registration API added for adding custom JIT
/// backend support via custom C++ classes. It is not for general purpose use.
class_& _def_unboxed(
const std::string& name,
std::function<void(jit::Stack&)> func,
c10::FunctionSchema schema,
std::string doc_string = "") {
auto method = std::make_unique<jit::BuiltinOpFunction>(
qualClassName + "." + name,
std::move(schema),
std::move(func),
std::move(doc_string));
classTypePtr->addMethod(method.get());
registerCustomClassMethod(std::move(method));
return *this;
}
/// def_pickle() is used to define exactly what state gets serialized
/// or deserialized for a given instance of a custom C++ class in
/// Python or TorchScript. This protocol is equivalent to the Pickle
/// concept of `__getstate__` and `__setstate__` from Python
/// (https://docs.python.org/2/library/pickle.html#object.__getstate__)
///
/// Currently, both the `get_state` and `set_state` callables must be
/// C++ lambda expressions. They should have the following signatures,
/// where `CurClass` is the class you're registering and `T1` is some object
/// that encapsulates the state of the object.
///
/// __getstate__(intrusive_ptr<CurClass>) -> T1
/// __setstate__(T2) -> intrusive_ptr<CurClass>
///
/// `T1` must be an object that is convertable to IValue by the same rules
/// for custom op/method registration.
///
/// For the common case, T1 == T2. T1 can also be a subtype of T2. An
/// example where it makes sense for T1 and T2 to differ is if __setstate__
/// handles legacy formats in a backwards compatible way.
///
/// Example:
///
/// .def_pickle(
/// // __getstate__
/// [](const c10::intrusive_ptr<MyStackClass<std::string>>& self) {
/// return self->stack_;
/// },
/// [](std::vector<std::string> state) { // __setstate__
/// return c10::make_intrusive<MyStackClass<std::string>>(
/// std::vector<std::string>{"i", "was", "deserialized"});
/// })
template <typename GetStateFn, typename SetStateFn>
class_& def_pickle(GetStateFn&& get_state, SetStateFn&& set_state) {
static_assert(
c10::guts::is_stateless_lambda<std::decay_t<GetStateFn>>::value &&
c10::guts::is_stateless_lambda<std::decay_t<SetStateFn>>::value,
"def_pickle() currently only supports lambdas as "
"__getstate__ and __setstate__ arguments.");
def("__getstate__", std::forward<GetStateFn>(get_state));
// __setstate__ needs to be registered with some custom handling:
// We need to wrap the invocation of the user-provided function
// such that we take the return value (i.e. c10::intrusive_ptr<CurrClass>)
// and assign it to the `capsule` attribute.
using SetStateTraits =
c10::guts::infer_function_traits_t<std::decay_t<SetStateFn>>;
using SetStateArg = typename c10::guts::typelist::head_t<
typename SetStateTraits::parameter_types>;
auto setstate_wrapper = [set_state = std::forward<SetStateFn>(set_state)](
c10::tagged_capsule<CurClass> self,
SetStateArg arg) {
c10::intrusive_ptr<CurClass> classObj =
std::invoke(set_state, std::move(arg));
auto object = self.ivalue.toObject();
object->setSlot(0, c10::IValue::make_capsule(classObj));
};
defineMethod(
"__setstate__",
detail::wrap_func<CurClass, decltype(setstate_wrapper)>(
std::move(setstate_wrapper)));
// type validation
auto getstate_schema = classTypePtr->getMethod("__getstate__").getSchema();
#ifndef STRIP_ERROR_MESSAGES
auto format_getstate_schema = [&getstate_schema]() {
std::stringstream ss;
ss << getstate_schema;
return ss.str();
};
#endif
TORCH_CHECK(
getstate_schema.arguments().size() == 1,
"__getstate__ should take exactly one argument: self. Got: ",
format_getstate_schema());
auto first_arg_type = getstate_schema.arguments().at(0).type();
TORCH_CHECK(
*first_arg_type == *classTypePtr,
"self argument of __getstate__ must be the custom class type. Got ",
first_arg_type->repr_str());
TORCH_CHECK(
getstate_schema.returns().size() == 1,
"__getstate__ should return exactly one value for serialization. Got: ",
format_getstate_schema());
auto ser_type = getstate_schema.returns().at(0).type();
auto setstate_schema = classTypePtr->getMethod("__setstate__").getSchema();
auto arg_type = setstate_schema.arguments().at(1).type();
TORCH_CHECK(
ser_type->isSubtypeOf(*arg_type),
"__getstate__'s return type should be a subtype of "
"input argument of __setstate__. Got ",
ser_type->repr_str(),
" but expected ",
arg_type->repr_str());
return *this;
}
private:
template <typename Func>
torch::jit::Function* defineMethod(
std::string name,
Func func,
std::string doc_string = "",
std::initializer_list<arg> default_args = {}) {
auto qualMethodName = qualClassName + "." + name;
auto schema =
c10::inferFunctionSchemaSingleReturn<Func>(std::move(name), "");
// If default values are provided for function arguments, there must be
// none (no default values) or default values for all function
// arguments, except for self. This is because argument names are not
// extracted by inferFunctionSchemaSingleReturn, and so there must be a
// torch::arg instance in default_args even for arguments that do not
// have an actual default value provided.
TORCH_CHECK(
default_args.size() == 0 ||
default_args.size() == schema.arguments().size() - 1,
"Default values must be specified for none or all arguments");
// If there are default args, copy the argument names and default values to
// the function schema.
if (default_args.size() > 0) {
schema = withNewArguments(schema, default_args);
}
auto wrapped_func =
[func = std::move(func)](jit::Stack& stack) mutable -> void {
// TODO: we need to figure out how to profile calls to custom functions
// like this! Currently can't do it because the profiler stuff is in
// libtorch and not ATen
using RetType =
typename c10::guts::infer_function_traits_t<Func>::return_type;
detail::BoxedProxy<RetType, Func>()(stack, func);
};
auto method = std::make_unique<jit::BuiltinOpFunction>(
qualMethodName,
std::move(schema),
std::move(wrapped_func),
std::move(doc_string));
// Register the method here to keep the Method alive.
// ClassTypes do not hold ownership of their methods (normally it
// those are held by the CompilationUnit), so we need a proxy for
// that behavior here.
auto method_val = method.get();
classTypePtr->addMethod(method_val);
registerCustomClassMethod(std::move(method));
return method_val;
}
};
/// make_custom_class() is a convenient way to create an instance of a
/// registered custom class and wrap it in an IValue, for example when you want
/// to pass the object to TorchScript. Its syntax is equivalent to APIs like
/// `std::make_shared<>` or `c10::make_intrusive<>`.
///
/// For example, if you have a custom C++ class that can be constructed from an
/// `int` and `std::string`, you might use this API like so:
///
/// IValue custom_class_iv = torch::make_custom_class<MyClass>(3,
/// "foobarbaz");
template <typename CurClass, typename... CtorArgs>
c10::IValue make_custom_class(CtorArgs&&... args) {
auto userClassInstance =
c10::make_intrusive<CurClass>(std::forward<CtorArgs>(args)...);
return c10::IValue(std::move(userClassInstance));
}
// Alternative api for creating a torchbind class over torch::class_ this api is
// preffered to prevent size regressions on Edge usecases. Must be used in
// conjunction with TORCH_SELECTIVE_CLASS macro aka
// selective_class<foo>("foo_namespace", TORCH_SELECTIVE_CLASS("foo"))
template <class CurClass>
inline class_<CurClass> selective_class_(
const std::string& namespace_name,
detail::SelectiveStr<true> className) {
auto class_name = std::string(className.operator const char*());
return torch::class_<CurClass>(namespace_name, class_name);
}
template <class CurClass>
inline detail::ClassNotSelected selective_class_(
const std::string&,
detail::SelectiveStr<false>) {
return detail::ClassNotSelected();
}
// jit namespace for backward-compatibility
// We previously defined everything in torch::jit but moved it out to
// better reflect that these features are not limited only to TorchScript
namespace jit {
using ::torch::class_;
using ::torch::getCustomClass;
using ::torch::init;
using ::torch::isCustomClass;
} // namespace jit
template <class CurClass>
inline class_<CurClass> Library::class_(const std::string& className) {
TORCH_CHECK(
kind_ == DEF || kind_ == FRAGMENT,
"class_(\"",
className,
"\"): Cannot define a class inside of a TORCH_LIBRARY_IMPL block. "
"All class_()s should be placed in the (unique) TORCH_LIBRARY block for their namespace. "
"(Error occurred at ",
file_,
":",
line_,
")");
TORCH_INTERNAL_ASSERT(ns_.has_value(), file_, ":", line_);
return torch::class_<CurClass>(*ns_, className);
}
const std::unordered_set<std::string> getAllCustomClassesNames();
template <class CurClass>
inline class_<CurClass> Library::class_(detail::SelectiveStr<true> className) {
auto class_name = std::string(className.operator const char*());
TORCH_CHECK(
kind_ == DEF || kind_ == FRAGMENT,
"class_(\"",
class_name,
"\"): Cannot define a class inside of a TORCH_LIBRARY_IMPL block. "
"All class_()s should be placed in the (unique) TORCH_LIBRARY block for their namespace. "
"(Error occurred at ",
file_,
":",
line_,
")");
TORCH_INTERNAL_ASSERT(ns_.has_value(), file_, ":", line_);
return torch::class_<CurClass>(*ns_, class_name);
}
template <class CurClass>
inline detail::ClassNotSelected Library::class_(detail::SelectiveStr<false>) {
return detail::ClassNotSelected();
}
} // namespace torch
```
|
=================================================================================================================================
SOURCE CODE FILE: custom_class_detail.h
LINES: 1
SIZE: 7.88 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\custom_class_detail.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h>
#include <ATen/core/function.h>
#include <c10/util/Metaprogramming.h>
#include <c10/util/TypeTraits.h>
#include <c10/util/irange.h>
#include <functional>
namespace torch {
namespace detail {
/**
* In the Facebook internal build (using BUCK), this macro is enabled by
* passing in -c pt.enable_record_kernel_dtype=1 when building the tracer
* binary.
*/
#if defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE
TORCH_API void record_custom_class(std::string name);
/**
* Record an instance of a custom class being loaded
* grab portion of string after final '.' from qualified name
* as this seemingly aligns with how users name their custom classes
* example: __torch__.torch.classes.xnnpack.Conv2dOpContext
*/
#define RECORD_CUSTOM_CLASS(NAME) \
auto name = std::string(NAME); \
detail::record_custom_class(name.substr(name.find_last_of(".") + 1));
#else
#define RECORD_CUSTOM_CLASS(NAME)
#endif
} // namespace detail
/// This struct is used to represent default values for arguments
/// when registering methods for custom classes.
/// static auto register_foo = torch::class_<Foo>("myclasses", "Foo")
/// .def("myMethod", &Foo::myMethod, {torch::arg("name") = name});
struct arg {
// Static method for representing a default value of None. This is meant to
// be used like so:
// torch::arg("name") = torch::arg::none
// and is identical to:
// torch::arg("name") = IValue()
static c10::IValue none() {
return c10::IValue();
}
// Explicit constructor.
explicit arg(std::string name)
: name_(std::move(name)), value_(std::nullopt) {}
// Assignment operator. This enables the pybind-like syntax of
// torch::arg("name") = value.
arg& operator=(const c10::IValue& rhs) {
value_ = rhs;
return *this;
}
// The name of the argument. This is copied to the schema; argument
// names cannot be extracted from the C++ declaration.
std::string name_;
// IValue's default constructor makes it None, which is not distinguishable
// from an actual, user-provided default value that is None. This boolean
// helps distinguish between the two cases.
std::optional<c10::IValue> value_;
};
namespace detail {
// Argument type utilities
template <class R, class...>
struct types {
using type = types;
};
template <typename Method>
struct WrapMethod;
template <typename R, typename CurrClass, typename... Args>
struct WrapMethod<R (CurrClass::*)(Args...)> {
WrapMethod(R (CurrClass::*m)(Args...)) : m(std::move(m)) {}
R operator()(c10::intrusive_ptr<CurrClass> cur, Args... args) {
return std::invoke(m, *cur, args...);
}
R (CurrClass::*m)(Args...);
};
template <typename R, typename CurrClass, typename... Args>
struct WrapMethod<R (CurrClass::*)(Args...) const> {
WrapMethod(R (CurrClass::*m)(Args...) const) : m(std::move(m)) {}
R operator()(c10::intrusive_ptr<CurrClass> cur, Args... args) {
return std::invoke(m, *cur, args...);
}
R (CurrClass::*m)(Args...) const;
};
// Adapter for different callable types
template <
typename CurClass,
typename Func,
std::enable_if_t<
std::is_member_function_pointer_v<std::decay_t<Func>>,
bool> = false>
WrapMethod<Func> wrap_func(Func f) {
return WrapMethod<Func>(std::move(f));
}
template <
typename CurClass,
typename Func,
std::enable_if_t<
!std::is_member_function_pointer_v<std::decay_t<Func>>,
bool> = false>
Func wrap_func(Func f) {
return f;
}
template <
class Functor,
bool AllowDeprecatedTypes,
size_t... ivalue_arg_indices>
typename c10::guts::infer_function_traits_t<Functor>::return_type
call_torchbind_method_from_stack(
Functor& functor,
jit::Stack& stack,
std::index_sequence<ivalue_arg_indices...>) {
(void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would
// be unused and we have to silence the compiler warning.
constexpr size_t num_ivalue_args = sizeof...(ivalue_arg_indices);
using IValueArgTypes =
typename c10::guts::infer_function_traits_t<Functor>::parameter_types;
// TODO We shouldn't use c10::impl stuff directly here. We should use the
// KernelFunction API instead.
return (functor)(c10::impl::ivalue_to_arg<
typename c10::impl::decay_if_not_tensor<
c10::guts::typelist::
element_t<ivalue_arg_indices, IValueArgTypes>>::type,
AllowDeprecatedTypes>::
call(torch::jit::peek(
stack, ivalue_arg_indices, num_ivalue_args))...);
}
template <class Functor, bool AllowDeprecatedTypes>
typename c10::guts::infer_function_traits_t<Functor>::return_type
call_torchbind_method_from_stack(Functor& functor, jit::Stack& stack) {
constexpr size_t num_ivalue_args =
c10::guts::infer_function_traits_t<Functor>::number_of_parameters;
return call_torchbind_method_from_stack<Functor, AllowDeprecatedTypes>(
functor, stack, std::make_index_sequence<num_ivalue_args>());
}
template <class RetType, class Func>
struct BoxedProxy;
template <class RetType, class Func>
struct BoxedProxy {
void operator()(jit::Stack& stack, Func& func) {
auto retval = call_torchbind_method_from_stack<Func, false>(func, stack);
constexpr size_t num_ivalue_args =
c10::guts::infer_function_traits_t<Func>::number_of_parameters;
torch::jit::drop(stack, num_ivalue_args);
stack.emplace_back(c10::ivalue::from(std::move(retval)));
}
};
template <class Func>
struct BoxedProxy<void, Func> {
void operator()(jit::Stack& stack, Func& func) {
call_torchbind_method_from_stack<Func, false>(func, stack);
constexpr size_t num_ivalue_args =
c10::guts::infer_function_traits_t<Func>::number_of_parameters;
torch::jit::drop(stack, num_ivalue_args);
stack.emplace_back();
}
};
inline bool validIdent(size_t i, char n) {
return isalpha(n) || n == '_' || (i > 0 && isdigit(n));
}
inline void checkValidIdent(const std::string& str, const char* type) {
for (const auto i : c10::irange(str.size())) {
TORCH_CHECK(
validIdent(i, str[i]),
type,
" must be a valid Python/C++ identifier."
" Character '",
str[i],
"' at index ",
i,
" is illegal.");
}
}
class TORCH_API class_base {
protected:
explicit class_base(
const std::string& namespaceName,
const std::string& className,
std::string doc_string,
const std::type_info& intrusivePtrClassTypeid,
const std::type_info& taggedCapsuleClass);
static c10::FunctionSchema withNewArguments(
const c10::FunctionSchema& schema,
std::initializer_list<arg> default_args);
std::string qualClassName;
at::ClassTypePtr classTypePtr;
};
} // namespace detail
TORCH_API void registerCustomClass(at::ClassTypePtr class_type);
TORCH_API void registerCustomClassMethod(std::unique_ptr<jit::Function> method);
// Given a qualified name (e.g. __torch__.torch.classes.Foo), return
// the ClassType pointer to the Type that describes that custom class,
// or nullptr if no class by that name was found.
TORCH_API at::ClassTypePtr getCustomClass(const std::string& name);
// Given an IValue, return true if the object contained in that IValue
// is a custom C++ class, otherwise return false.
// NOLINTNEXTLINE(readability-redundant-declaration)
TORCH_API bool isCustomClass(const c10::IValue& v);
// This API is for testing purposes ONLY. It should not be used in
// any load-bearing code.
TORCH_API std::vector<c10::FunctionSchema> customClassSchemasForBCCheck();
namespace jit {
using ::torch::registerCustomClass;
using ::torch::registerCustomClassMethod;
} // namespace jit
} // namespace torch
```
|
=======================================================================================================================
SOURCE CODE FILE: extension.h
LINES: 1
SIZE: 0.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\extension.h
ENCODING: utf-8
```h
#pragma once
#ifndef TORCH_INDUCTOR_CPP_WRAPPER
// All pure C++ headers for the C++ frontend.
#include <torch/all.h>
#endif
// Python bindings for the C++ frontend (includes Python.h).
#include <torch/python.h>
```
|
=====================================================================================================================
SOURCE CODE FILE: library.h
LINES: 1
SIZE: 40.72 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\library.h
ENCODING: utf-8
```h
#pragma once
/// \file
///
/// This header provides an API for extending PyTorch's core library
/// of operators with user defined operators and data types. This
/// API can be used in a few ways:
///
/// * You can define new custom operators and classes with TORCH_LIBRARY(),
/// making them available for use in both eager Python as well as in
/// TorchScript. This API is modeled off of pybind11's `PYBIND11_MODULE`
/// macro, as the provided functionality is similar (pybind11 lets you bind
/// C++ to Python only; `torch/library.h` lets you bind C++ simultaneously to
/// Python and TorchScript).
///
/// * You can override existing operators with TORCH_LIBRARY_IMPL(),
/// providing a new implementation for these operators for a custom
/// backend (e.g., XLA). When you pass operators with tensors of your custom
/// backend, your overridden implementations will be called instead
/// of the standard implementations.
///
/// * You can use both capabilities at the same time, allowing you
/// to write custom operators that register CPU/CUDA/Autograd
/// implementations without having to write the boilerplate
/// conditionals yourself.
///
/// For a tutorial style introduction to the library API, check
/// out the [Extending TorchScript with Custom C++
/// Operators](https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html)
/// tutorial.
///
/// ```
/// // Define a library whose operators live in the namespace 'myops'.
/// // You must define all of the operators for this library in
/// // this namespace.
/// TORCH_LIBRARY(myops, m) {
/// // Define a operator with exactly one implementation for all backends.
/// m.def("add(Tensor self, Tensor other) -> Tensor", &add_impl);
///
/// // Define a schema for an operator, but provide no implementation
/// // (use this syntax if you want to use the dispatcher)
/// m.def("mul(Tensor self, Tensor other) -> Tensor");
///
/// // Provide an implementation for a defined operator (you can
/// // provide multiple; one per backend). The dispatcher takes care of
/// // calling the correct implementation depending on if we get a CPU
/// // tensor or a CUDA tensor
/// m.impl("mul", torch::kCPU, &mul_cpu_impl);
/// m.impl("mul", torch::kCUDA, &mul_cuda_impl);
/// }
///
/// // Define implementations for operators for a non-standard backend,
/// // e.g., XLA (valid values are entries of DispatchKey). This can
/// // be used to define operators in a different file than the initial
/// // TORCH_LIBRARY definition (e.g., if it is in an external library)
/// TORCH_LIBRARY_IMPL(myops, XLA, m) {
/// m.impl("mul", &mul_xla_impl);
/// }
/// ```
#include <ATen/core/op_registration/infer_schema.h>
#include <ATen/core/op_registration/op_allowlist.h>
#include <ATen/core/dispatch/Dispatcher.h>
#include <c10/core/DispatchKey.h>
#include <torch/csrc/jit/frontend/function_schema_parser.h>
// Just for inferFunctionSchemaFromFunctor
#include <ATen/core/enum_tag.h>
#include <ATen/core/op_registration/op_registration.h>
namespace torch {
#if defined C10_MOBILE
/**
* The NoInferSchemaTag is a type name used to indicate that this call to the
* CppFunction constructor should not trigger schema inference from functor.
* Schema inference from functor utilizes template meta-programming, and is
* costly from a size perspective. Ideally, one would expect that the schema
* inference would require very little binary size since most of the
* computation can be done by the compiler at build time, but that isn't
* necessarily the case.
*
* Schema inference is elided only for mobile use-cases where we don't need
* the additional runtime cost or size overhead on client devices.
*
*/
struct NoInferSchemaTag {};
#endif
#define HAS_PT2_COMPLIANT_TAG
// For multipy/torchdeploy use case
enum class _RegisterOrVerify { REGISTER, VERIFY };
template <class CurClass>
class class_;
#define HAS_IMPL_ABSTRACT_PYSTUB
/// Represents a C++ function that implements an operator. Most users won't
/// interact directly with this class, except via error messages: the
/// constructors this function define the set of permissible "function"-like
/// things you can bind via the interface.
///
/// This class erases the type of the passed in function, but durably records
/// the type via an inferred schema for the function.
class TORCH_API CppFunction final {
// TODO: This is morally the same thing as KernelRegistrationConfig, but it's
// opaque to the user.
public:
/// This overload accepts function pointers, e.g., `CppFunction(&add_impl)`
template <typename Func>
explicit CppFunction(
Func* f,
std::enable_if_t<
c10::guts::is_function_type<Func>::value,
std::nullptr_t> = nullptr)
: func_(c10::KernelFunction::makeFromUnboxedRuntimeFunction(f)),
cpp_signature_(c10::impl::CppSignature::make<Func>()),
schema_(
c10::detail::inferFunctionSchemaFromFunctor<std::decay_t<Func>>())
{}
/// This overload accepts compile time function pointers, e.g.,
/// `CppFunction(TORCH_FN(add_impl))`
template <typename FuncPtr>
explicit CppFunction(
FuncPtr f,
std::enable_if_t<
c10::is_compile_time_function_pointer<FuncPtr>::value,
std::nullptr_t> = nullptr)
: func_(c10::KernelFunction::makeFromUnboxedFunction(f)),
cpp_signature_(
c10::impl::CppSignature::make<typename FuncPtr::FuncType>()),
schema_(c10::detail::inferFunctionSchemaFromFunctor<
typename FuncPtr::FuncType>())
{}
/// This overload accepts lambdas, e.g., `CppFunction([](const Tensor& self) {
/// ... })`
template <typename Lambda>
explicit CppFunction(
Lambda&& f,
std::enable_if_t<
c10::guts::is_functor<std::decay_t<Lambda>>::value,
std::nullptr_t> = nullptr)
: func_(c10::KernelFunction::makeFromUnboxedLambda(
std::forward<Lambda>(f))),
cpp_signature_(c10::impl::CppSignature::make<Lambda>()),
schema_(c10::detail::inferFunctionSchemaFromFunctor<
std::decay_t<Lambda>>())
{}
#if defined C10_MOBILE
/// This overload accepts function pointers, e.g., `CppFunction(&add_impl,
/// NoInferSchemaTag())`
template <typename Func>
explicit CppFunction(
Func* f,
NoInferSchemaTag,
std::enable_if_t<
c10::guts::is_function_type<Func>::value,
std::nullptr_t> = nullptr)
: func_(c10::KernelFunction::makeFromUnboxedRuntimeFunction(f)),
cpp_signature_(c10::impl::CppSignature::make<Func>())
// TODO: Don't go through WrapRuntimeKernelFunctor
,
schema_(nullptr),
debug_() {}
/// This overload accepts compile time function pointers, e.g.,
/// `CppFunction(TORCH_FN(add_impl), NoInferSchemaTag())`
template <typename FuncPtr>
explicit CppFunction(
FuncPtr f,
NoInferSchemaTag,
std::enable_if_t<
c10::is_compile_time_function_pointer<FuncPtr>::value,
std::nullptr_t> = nullptr)
: func_(c10::KernelFunction::makeFromUnboxedFunction(f)),
cpp_signature_(
c10::impl::CppSignature::make<typename FuncPtr::FuncType>())
// TODO: Don't go through WrapRuntimeKernelFunctor
,
schema_(nullptr),
debug_() {}
/// This overload accepts lambdas, e.g., `CppFunction([](const Tensor& self) {
/// ... }. NoInferSchemaTag())`
template <typename Lambda>
explicit CppFunction(
Lambda&& f,
NoInferSchemaTag,
std::enable_if_t<
c10::guts::is_functor<std::decay_t<Lambda>>::value,
std::nullptr_t> = nullptr)
: func_(c10::KernelFunction::makeFromUnboxedLambda(
std::forward<Lambda>(f))),
cpp_signature_(c10::impl::CppSignature::make<Lambda>())
// TODO: Don't go through WrapRuntimeKernelFunctor
,
schema_(nullptr),
debug_() {}
#endif
~CppFunction();
CppFunction(const CppFunction&) = delete;
CppFunction& operator=(const CppFunction&) = delete;
CppFunction(CppFunction&&) noexcept = default;
CppFunction& operator=(CppFunction&&) = default;
/// \private
/// Creates a function from a type-erased boxed kernel.
static CppFunction makeFromBoxedKernel(c10::BoxedKernel kernel) {
return CppFunction(
c10::KernelFunction::makeFromBoxedKernel(std::move(kernel)),
/* cpp_signature */ std::nullopt, // not known for boxed functions
/* schema */ nullptr);
}
/// This creates a fallthrough function. Fallthrough functions
/// immediately redispatch to the next available dispatch key,
/// but are implemented more efficiently than a hand written
/// function done in the same way.
static CppFunction makeFallthrough() {
return makeFromBoxedKernel(c10::BoxedKernel::makeFallthrough());
}
/// \private
///
/// Creates a function that raises an error saying that named tensors
/// are not supported when called.
static CppFunction makeNamedNotSupported() {
return makeFromBoxedKernel(c10::BoxedKernel::makeNamedNotSupported());
}
/// Create a function from a boxed kernel function with signature
/// `void(const OperatorHandle&, Stack*)`; i.e., they receive a
/// stack of arguments in a boxed calling convention, rather than
/// in the native C++ calling convention. Boxed functions are
/// typically only used to register backend fallbacks via
/// torch::Library::fallback().
template <c10::BoxedKernel::BoxedKernelFunction* func>
static CppFunction makeFromBoxedFunction() {
return makeFromBoxedKernel(c10::BoxedKernel::makeFromFunction<func>());
}
// Variant that takes in a boxed kernel function with a plumbed
// DispatchKeySet. See Note [Plumbing Keys Through The Dispatcher] for
// details.
template <c10::BoxedKernel::BoxedKernelFunction_withDispatchKeys* func>
static CppFunction makeFromBoxedFunction() {
return makeFromBoxedKernel(c10::BoxedKernel::makeFromFunction<func>());
}
/// Create a function from a boxed kernel functor which defines
/// `operator()(const OperatorHandle&, DispatchKeySet, Stack*)`
/// (receiving arguments from boxed calling convention) and inherits
/// from `c10::OperatorKernel`. Unlike makeFromBoxedFunction, functions
/// registered in this way can also carry additional state which
/// is managed by the functor; this is useful if you're writing an
/// adapter to some other implementation, e.g., a Python callable, which
/// is dynamically associated with the registered kernel.
template <class KernelFunctor>
static CppFunction makeFromBoxedFunctor(
std::unique_ptr<KernelFunctor> kernelFunctor) {
return makeFromBoxedKernel(
c10::BoxedKernel::makeFromFunctor(std::move(kernelFunctor)));
}
/// Create a function from an unboxed kernel function.
/// This is typically used to register common operators.
template <
typename FuncPtr,
std::enable_if_t<
c10::guts::is_function_type<FuncPtr>::value,
std::nullptr_t> = nullptr>
static CppFunction makeFromUnboxedFunction(FuncPtr* f) {
return CppFunction(f);
}
/// Create a function from a compile time unboxed kernel function pointer.
/// This is typically used to register common operators.
/// Compile time function pointers can be used to allow the compiler
/// to optimize (e.g. inline) calls to it.
template <
typename FuncPtr,
std::enable_if_t<
c10::is_compile_time_function_pointer<FuncPtr>::value,
std::nullptr_t> = nullptr>
static CppFunction makeFromUnboxedFunction(FuncPtr f) {
return CppFunction(f);
}
CppFunction&& debug(std::string d) && {
debug_ = std::move(d);
return std::move(*this);
}
private:
std::optional<c10::DispatchKey> dispatch_key_;
c10::KernelFunction func_;
std::optional<c10::impl::CppSignature> cpp_signature_;
std::unique_ptr<c10::FunctionSchema> schema_;
std::string debug_;
// The "setter" for dispatch_key_
template <typename Func>
friend CppFunction dispatch(c10::DispatchKey, Func&&);
// The only class which actually pulls out values from CppFunction (does so
// destructively, felt too lazy to write accessors that I don't even
// want users to use)
friend class Library;
CppFunction(
c10::KernelFunction func,
std::optional<c10::impl::CppSignature> cpp_signature,
std::unique_ptr<c10::FunctionSchema> schema);
};
/// \defgroup torch-dispatch-overloads torch::dispatch overloads
/// Create a torch::CppFunction which is associated with a specific
/// dispatch key. torch::CppFunctions that are tagged with a
/// c10::DispatchKey don't get invoked unless the dispatcher determines
/// that this particular c10::DispatchKey is the one that should be
/// dispatched to.
///
/// This function is generally not used directly, instead, prefer using
/// TORCH_LIBRARY_IMPL(), which will implicitly set the c10::DispatchKey
/// for all registration calls inside of its body.
///
/// \ingroup torch-dispatch-overloads
template <typename Func>
inline CppFunction dispatch(c10::DispatchKey k, Func&& raw_f) {
CppFunction f(std::forward<Func>(raw_f));
if (k == c10::DispatchKey::CatchAll) {
f.dispatch_key_ = std::nullopt;
} else {
f.dispatch_key_ = k;
}
return f;
}
/// Convenience overload of dispatch() which accepts c10::DeviceType
///
/// \ingroup torch-dispatch-overloads
template <typename Func>
inline CppFunction dispatch(c10::DeviceType type, Func&& raw_f) {
auto deviceTypeToDispatchKey = [](c10::DeviceType t) {
switch (t) {
// This list is synchronized with the k-constants in c10/core/DeviceType.h
case c10::DeviceType::CPU:
return c10::DispatchKey::CPU;
case c10::DeviceType::CUDA:
return c10::DispatchKey::CUDA;
case c10::DeviceType::IPU:
return c10::DispatchKey::IPU;
case c10::DeviceType::XLA:
return c10::DispatchKey::XLA;
case c10::DeviceType::Lazy:
return c10::DispatchKey::Lazy;
case c10::DeviceType::XPU:
return c10::DispatchKey::XPU;
case c10::DeviceType::MPS:
return c10::DispatchKey::MPS;
case c10::DeviceType::Meta:
return c10::DispatchKey::Meta;
case c10::DeviceType::HIP:
return c10::DispatchKey::HIP;
case c10::DeviceType::MAIA:
return c10::DispatchKey::MAIA;
case c10::DeviceType::HPU:
return c10::DispatchKey::HPU;
case c10::DeviceType::MTIA:
return c10::DispatchKey::MTIA;
case c10::DeviceType::PrivateUse1:
return c10::DispatchKey::PrivateUse1;
default:
TORCH_CHECK(
false,
"Device type ",
t,
" cannot be overloaded at dispatch time, "
"please file a bug report explaining what you were trying to do.");
}
};
return dispatch(deviceTypeToDispatchKey(type), std::forward<Func>(raw_f));
}
/// \defgroup torch-schema-overloads torch::schema overloads
/// Construct a c10::FunctionSchema from a string, with an explicitly
/// specified c10::AliasAnalysisKind. Ordinarily, schemas are simply
/// passed in as strings, but if you need to specify a custom alias
/// analysis, you can replace the string with a call to this function.
///
/// ```
/// // Default alias analysis (FROM_SCHEMA)
/// m.def("def3(Tensor self) -> Tensor");
/// // Pure function alias analysis
/// m.def(torch::schema("def3(Tensor self) -> Tensor",
/// c10::AliasAnalysisKind::PURE_FUNCTION));
/// ```
///
/// \ingroup torch-schema-overloads
inline c10::FunctionSchema schema(const char* str, c10::AliasAnalysisKind k, bool allow_typevars=false) {
c10::FunctionSchema s = torch::jit::parseSchema(str, /*allow_typevars*/allow_typevars);
s.setAliasAnalysis(k);
return s;
}
/// Function schemas can be directly constructed from string literals.
///
/// \ingroup torch-schema-overloads
inline c10::FunctionSchema schema(const char* s, bool allow_typevars=false) {
return schema(s, c10::AliasAnalysisKind::FROM_SCHEMA, allow_typevars);
}
/// \private
///
/// Already constructed function schemas are accepted if they are
/// rvalues.
///
/// \ingroup torch-schema-overloads
inline c10::FunctionSchema&& schema(c10::FunctionSchema&& s) {
return std::move(s);
}
namespace detail {
inline std::variant<c10::OperatorName, c10::FunctionSchema> constructSchemaOrName(
c10::FunctionSchema&& s) {
return std::move(s);
}
inline std::variant<c10::OperatorName, c10::FunctionSchema> constructSchemaOrName(
c10::OperatorName&& n) {
return std::move(n);
}
inline std::variant<c10::OperatorName, c10::FunctionSchema>
constructSchemaOrName(const char* str) {
auto s = torch::jit::parseSchemaOrName(str);
if (std::holds_alternative<c10::FunctionSchema>(s)) {
std::get<c10::FunctionSchema>(s).setAliasAnalysis(
c10::AliasAnalysisKind::FROM_SCHEMA);
}
return s;
}
class TorchLibraryInit;
} // namespace detail
// Note [Selective build]
// ~~~~~~~~~~~~~~~~~~~~~~
// In some settings, especially mobile, it is important to avoid compiling any
// references to functions that you aren't actually going to use, so that they
// can be eliminated by the linker. We call this capability "selective build".
//
// A very easy way to implement selective build which results in a lot of
// boilerplate is to just add ifdef's around every registration call, but this
// means you have to write a lot of extra lines of code at every registration
// site, and it also means you have to define some munging scheme to map
// operators to macros.
//
// Instead of doing this, we have a different mechanism centered around the
// concept of a SelectiveStr. A selective name is like a const char* string,
// except it also carries at compile time a boolean saying whether or not a
// registration should actually happen or not. We then have extra overloads
// which bypass registration entirely if a selective name is disabled. We do a
// constexpr test to see if a operator should be enabled or not; this is
// currently implemented in ATen/core/op_registration/op_allowlist.h
namespace detail {
// dummy class for non selected custom torchbind classes
class ClassNotSelected {
public:
ClassNotSelected& def_pickle(...) {
return *this;
}
ClassNotSelected& def(...) {
return *this;
}
};
// A SelectiveStr is like a const char*, except that it also comes
// with a type brand that says whether or not the name is enabled or
// not. If the string is disabled, then (at compile time) we DON'T generate
// a registration call for it. This class is not intended to be called
// directly; use TORCH_SELECTIVE_NAME or TORCH_SELECTIVE_SCHEMA macros below
// to create it.
template <bool enabled>
class SelectiveStr {
public:
constexpr explicit SelectiveStr(const char* name) : name_(name) {}
constexpr operator const char*() {
return name_;
}
private:
const char* name_;
};
#define TORCH_SELECTIVE_CLASS(n) \
torch::detail::SelectiveStr<c10::impl::custom_class_allowlist_check(n)>(n)
#define TORCH_SELECTIVE_NAME(n) \
torch::detail::SelectiveStr<c10::impl::op_allowlist_check(n)>(n)
#define TORCH_SELECTIVE_SCHEMA(n) \
torch::detail::SelectiveStr<c10::impl::schema_allowlist_check(n)>(n)
} // namespace detail
/// This object provides the API for defining operators and providing
/// implementations at dispatch keys. Typically, a torch::Library
/// is not allocated directly; instead it is created by the
/// TORCH_LIBRARY() or TORCH_LIBRARY_IMPL() macros.
///
/// Most methods on torch::Library return a reference to itself,
/// supporting method chaining.
///
/// ```
/// // Examples:
///
/// TORCH_LIBRARY(torchvision, m) {
/// // m is a torch::Library
/// m.def("roi_align", ...);
/// ...
/// }
///
/// TORCH_LIBRARY_IMPL(aten, XLA, m) {
/// // m is a torch::Library
/// m.impl("add", ...);
/// ...
/// }
/// ```
///
class TORCH_API Library final {
public:
/// \private
///
/// Which type of macro produced this Library
enum Kind {
DEF, // from TORCH_LIBRARY (no qualifier)
IMPL,
FRAGMENT,
};
/// \private
///
/// Use TORCH_LIBRARY() or TORCH_LIBRARY_IMPL() instead of using these
/// constructors directly
Library(
Kind kind,
std::string ns,
std::optional<c10::DispatchKey> k,
const char* file,
uint32_t line);
Library(const Library&) = delete;
Library& operator=(const Library&) = delete;
Library(Library&&) = default;
Library& operator=(Library&&) = default;
~Library() = default;
// Some notes about the API design here. We had the following constraints:
//
// - We need to support multiple "types" of arguments for schema and
// functions (e.g., unnamed lambda types, regular functions, const char*,
// fully instantiated schemas)
// - We don't want to write exponentially many overloads
// - We don't want to rely on implicit conversion to a common type,
// because the C++ compiler will only be willing to do a single
// implicit conversion (reducing the set of valid types which you
// can invoke with); also error messages are worse when an implicit
// conversion is not selected (as the compiler will not explain
// why it didn't select an implicit conversion; this is different
// from overloads where it will explain each candidate overload and
// why it didn't apply)
//
// To solve all of these constraints at the same time, we use a trick taken
// from the pybind11 library: template over the argument in the user visible
// API, and inside of the templated function explicitly call an overloaded
// function to resolve the argument to a real type. You get the good error
// messages from overloads, but at the same time you only need to write the
// overload for any given argument type once.
/// Declare an operator with a schema, but don't provide any implementations
/// for it. You're expected to then provide implementations using the
/// impl() method. All template arguments are inferred.
///
/// \param raw_schema The schema of the operator to be defined.
/// Typically, this is a `const char*` string literal, but any type
/// accepted by torch::schema() is accepted here.
///
/// ```
/// // Example:
/// TORCH_LIBRARY(myops, m) {
/// m.def("add(Tensor self, Tensor other) -> Tensor");
/// }
/// ```
template <typename Schema>
Library& def(
Schema&& raw_schema,
const std::vector<at::Tag>& tags = {},
_RegisterOrVerify rv = _RegisterOrVerify::REGISTER) & {
c10::FunctionSchema s = schema(std::forward<Schema>(raw_schema));
return _def(std::move(s), nullptr, tags, rv);
}
/// Declares that for all operators that are subsequently def'ed, their
/// fake impls may be found in the given Python module (pymodule).
/// This registers some help text that is used if the fake impl
/// cannot be found.
///
/// Args:
/// - pymodule: the python module
/// - context: We may include this in the error message.
Library& set_python_module(const char* pymodule, const char* context = "") {
python_module_ = {pymodule, context};
return *this;
}
/// Deprecated; use set_python_module instead
Library& impl_abstract_pystub(const char* pymodule, const char* context = "") {
return set_python_module(pymodule, context);
}
/// Define an operator for a schema and then register an implementation for
/// it. This is typically what you would use if you aren't planning
/// on making use of the dispatcher to structure your operator
/// implementation. It's roughly equivalent to calling def() and
/// then impl(), but if you omit the schema of the operator, we will
/// infer it from the type of your C++ function. All template
/// arguments are inferred.
///
/// \param raw_name_or_schema The schema of the operator to be
/// defined, or just the name of the operator if the schema is to be
/// inferred from `raw_f`. Typically a `const char*` literal.
/// \param raw_f The C++ function that implements this operator.
/// Any valid constructor of torch::CppFunction is accepted here;
/// typically you provide a function pointer or lambda.
///
/// ```
/// // Example:
/// TORCH_LIBRARY(myops, m) {
/// m.def("add", add_fn);
/// }
/// ```
template <typename NameOrSchema, typename Func>
Library& def(NameOrSchema&& raw_name_or_schema, Func&& raw_f,
const std::vector<at::Tag>& tags = {}) & {
CppFunction f(std::forward<Func>(raw_f));
return _def(
detail::constructSchemaOrName(
::std::forward<NameOrSchema>(raw_name_or_schema)),
::std::move(f), tags);
}
/// Register an implementation for an operator. You may register multiple
/// implementations for a single operator at different dispatch keys
/// (see torch::dispatch()). Implementations must have a corresponding
/// declaration (from def()), otherwise they are invalid. If you plan
/// to register multiple implementations, DO NOT provide a function
/// implementation when you def() the operator.
///
/// \param name The name of the operator to implement. Do NOT provide
/// schema here.
/// \param raw_f The C++ function that implements this operator. Any
/// valid constructor of torch::CppFunction is accepted here;
/// typically you provide a function pointer or lambda.
///
/// ```
/// // Example:
/// TORCH_LIBRARY_IMPL(myops, CUDA, m) {
/// m.impl("add", add_cuda);
/// }
/// ```
template <typename Name, typename Func>
Library& impl(
Name name,
Func&& raw_f,
_RegisterOrVerify rv = _RegisterOrVerify::REGISTER) & {
// TODO: need to raise an error when you impl a function that has a
// catch all def
#if defined C10_MOBILE
CppFunction f(std::forward<Func>(raw_f), NoInferSchemaTag());
#else
CppFunction f(std::forward<Func>(raw_f));
#endif
return _impl(name, std::move(f), rv);
}
#if defined C10_MOBILE
// Note: This overload is needed only for C10_MOBILE, since the automatically
// defined copy constructor for the CppFunction doesn't have the additional
// NoInferSchemaTag argument. We define the overload for the impl() function
// to accept a CppFunction&& argument. The already constructed CppFunction
// object may or may not have the inferred schema, but it doesn't matter
// for our purposes since if it already has the inferred schema, then we
// might as well just pass it through directly.
//
template <typename Name>
Library& impl(Name name, CppFunction&& raw_f) & {
// TODO: need to raise an error when you impl a function that has a
// catch all def
CppFunction f(std::forward<CppFunction>(raw_f));
return _impl(name, std::move(f));
}
#endif
// Helper for getting an OperatorName for a const char*. You probably
// don't need this.
c10::OperatorName _resolve(const char* name) const;
/// \private
///
/// Convenience overload for directly specifying the dispatch key when
/// impl(). You probably don't need this; instead, prefer specifying
/// the dispatch key for the entire block in TORCH_LIBRARY_IMPL()
template <typename Name, typename Dispatch, typename Func>
Library& impl(Name name, Dispatch&& key, Func&& raw_f) & {
return impl(
name, dispatch(std::forward<Dispatch>(key), std::forward<Func>(raw_f)));
}
template <typename Name, typename Func>
Library& impl_UNBOXED(Name /*name*/, Func* /*raw_f*/) & {
static_assert(
c10::guts::false_t<Func>(),
".impl_UNBOXED(...) was removed. Please use .impl(...) instead.");
return *this;
}
// These overloads cover cases when a SelectiveStr (see Note [Selective
// build]) has been disabled at compile time. In that case, don't generate
// any code referencing the passed in functions at all.
Library& def(detail::SelectiveStr<false>, const std::vector<at::Tag>& tags [[maybe_unused]] = {}) & {
return *this;
}
Library& def(detail::SelectiveStr<true> raw_schema, const std::vector<at::Tag>& tags = {}) & {
return def(raw_schema.operator const char*(), tags);
}
template <typename Func>
Library& def(detail::SelectiveStr<false>, Func&& /*raw_f*/, const std::vector<at::Tag>& tags [[maybe_unused]] = {}) & {
return *this;
}
template <typename Func>
Library& def(detail::SelectiveStr<true> raw_name_or_schema, Func&& raw_f, const std::vector<at::Tag>& tags = {}) & {
return def(
raw_name_or_schema.operator const char*(), std::forward<Func>(raw_f), tags);
}
template <typename Func>
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
Library& impl(detail::SelectiveStr<false>, Func&& /*raw_f*/) & {
return *this;
}
template <typename Dispatch, typename Func>
Library& impl(
detail::SelectiveStr<false>,
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
Dispatch&& /*key*/,
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
Func&& /*raw_f*/) & {
return *this;
}
template <typename Func>
Library& impl_UNBOXED(
detail::SelectiveStr<false> /*name*/,
Func* /*raw_f*/) & {
static_assert(
c10::guts::false_t<Func>(),
".impl_UNBOXED(...) was removed. Please use .impl(...) instead.");
return *this;
}
template <typename Func>
Library& impl(detail::SelectiveStr<true> name, Func&& raw_f) & {
return impl(name.operator const char*(), std::forward<Func>(raw_f));
}
template <typename Dispatch, typename Func>
Library& impl(
detail::SelectiveStr<true> name,
Dispatch&& key,
Func&& raw_f) & {
return impl(
name.operator const char*(),
std::forward<Dispatch>(key),
std::forward<Func>(raw_f));
}
template <typename Func>
Library& impl_UNBOXED(
detail::SelectiveStr<true> /*name*/,
Func* /*raw_f*/) & {
static_assert(
c10::guts::false_t<Func>(),
".impl_UNBOXED(...) was removed. Please use .impl(...) instead.");
return *this;
}
/// Register a fallback implementation for all operators which will be used
/// if there is not a specific implementation for an operator available.
/// There MUST be a DispatchKey associated with a fallback; e.g.,
/// only call this from TORCH_LIBRARY_IMPL() with namespace `_`.
///
/// \param raw_f The function that implements the fallback. Unboxed
/// functions typically do not work as fallback functions, as
/// fallback functions must work for every operator (even though
/// they have varying type signatures). Typical arguments are
/// CppFunction::makeFallthrough() or
/// CppFunction::makeFromBoxedFunction()
///
/// ```
/// // Example:
///
/// TORCH_LIBRARY_IMPL(_, AutogradXLA, m) {
/// // If there is not a kernel explicitly registered
/// // for AutogradXLA, fallthrough to the next
/// // available kernel
/// m.fallback(torch::CppFunction::makeFallthrough());
/// }
///
/// // See aten/src/ATen/core/dispatch/backend_fallback_test.cpp
/// // for a full example of boxed fallback
/// ```
template <typename Func>
Library& fallback(Func&& raw_f) & {
CppFunction f((std::forward<Func>(raw_f)));
return _fallback(std::move(f));
}
template <class CurClass>
inline torch::class_<CurClass> class_(const std::string& className);
// These overloads enable the use of selective build on classes registered
// within a library. The API is the same as before with 1 minor change.
// Instead of m.class_<foo>("foo") you instead do
// m.class_<foo>(TORCH_SELECTIVE_CLASS("foo"))
template <class CurClass>
inline torch::class_<CurClass> class_(detail::SelectiveStr<true> className);
template <class CurClass>
inline detail::ClassNotSelected class_(detail::SelectiveStr<false> className);
// De-registers all registrations created with this Library
void reset();
private:
Kind kind_;
std::optional<std::string> ns_;
std::optional<c10::DispatchKey> dispatch_key_;
std::optional<std::pair<const char*, const char*>> python_module_;
const char* file_;
uint32_t line_;
std::vector<c10::RegistrationHandleRAII> registrars_;
friend class detail::TorchLibraryInit;
// Non-user visible actual implementations of functions. These aren't
// public because we only implement & qualifier and not && qualifier
Library& _def(
c10::FunctionSchema&& schema,
c10::OperatorName* out_name = nullptr,
const std::vector<at::Tag>& tags = {},
_RegisterOrVerify rv = _RegisterOrVerify::REGISTER) &;
Library& _def(
std::variant<c10::OperatorName, c10::FunctionSchema>&&,
CppFunction&& f,
const std::vector<at::Tag>& tags = {}) &;
Library& _impl(
const char* name,
CppFunction&& f,
_RegisterOrVerify rv = _RegisterOrVerify::REGISTER) &;
Library& _fallback(CppFunction&& f) &;
at::OperatorName _parseNameForLib(const char* name_str) const;
};
namespace detail {
class TorchLibraryInit final {
private:
using InitFn = void(Library&);
Library lib_;
public:
TorchLibraryInit(
Library::Kind kind,
InitFn* fn,
const char* ns,
std::optional<c10::DispatchKey> k,
const char* file,
uint32_t line)
: lib_(kind, ns, k, file, line) {
fn(lib_);
}
};
} // namespace detail
} // namespace torch
// NB: The EXACT NAMING of the initializer functions (e.g.,
// TORCH_LIBRARY_init_aten) matters for the code analyzer;
// see the regexes at tools/code_analyzer/run_analyzer.sh
/// Macro for defining a function that will be run at static
/// initialization time to define a library of operators in the
/// namespace `ns` (must be a valid C++ identifier, no quotes).
/// Use this macro when you want to define a new set of custom operators
/// that do not already exist in PyTorch.
///
/// Example usage:
///
/// ```
/// TORCH_LIBRARY(myops, m) {
/// // m is a torch::Library; methods on it will define
/// // operators in the myops namespace
/// m.def("add", add_impl);
/// }
/// ```
///
/// The `m` argument is bound to a torch::Library that is used to
/// register operators. There may only be one TORCH_LIBRARY()
/// for any given namespace.
#define TORCH_LIBRARY(ns, m) \
static void TORCH_LIBRARY_init_##ns(torch::Library&); \
static const torch::detail::TorchLibraryInit TORCH_LIBRARY_static_init_##ns( \
torch::Library::DEF, \
&TORCH_LIBRARY_init_##ns, \
#ns, \
std::nullopt, \
__FILE__, \
__LINE__); \
void TORCH_LIBRARY_init_##ns(torch::Library& m)
/// \private
///
/// This macro is a version of TORCH_LIBRARY() that doesn't enforce that there
/// is only one library (it is a "fragment"). This is used inside the
/// PerOpRegistration.cpp file, as well as in places where all op registrations
/// within the same namespace cannot be easily put into one macro block
/// (this is mostly the case for custom ops in fbcode that were ported from
/// the old API)
#define TORCH_LIBRARY_FRAGMENT(ns, m) _TORCH_LIBRARY_FRAGMENT(ns, m, C10_UID)
/// \private
///
/// The above macro requires an extra unique identifier (uid) to prevent
/// variable name collisions This can happen if TORCH_LIBRARY_FRAGMENT is called
/// multiple times with the same namespace in the same translation unit. Note
/// that the TORCH_LIBRARY variant doesn't run into this problem, because it
/// enforces that it can only be called once for a given namespace.
#define _TORCH_LIBRARY_FRAGMENT(ns, m, uid) \
static void C10_CONCATENATE( \
TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid)(torch::Library&); \
static const torch::detail::TorchLibraryInit C10_CONCATENATE( \
TORCH_LIBRARY_FRAGMENT_static_init_##ns##_, uid)( \
torch::Library::FRAGMENT, \
&C10_CONCATENATE(TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid), \
#ns, \
std::nullopt, \
__FILE__, \
__LINE__); \
void C10_CONCATENATE( \
TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid)(torch::Library & m)
/// Macro for defining a function that will be run at static
/// initialization time to define operator overrides for dispatch key
/// `k` (must be an unqualified enum member of c10::DispatchKey) in
/// namespace `ns` (must be a valid C++ identifer, no quotes). Use this
/// macro when you want to implement a preexisting set of custom
/// operators on a new dispatch key (e.g., you want to provide CUDA
/// implementations of already existing operators). One common usage
/// pattern is to use TORCH_LIBRARY() to define schema for all new
/// operators you want to define, and then use several
/// TORCH_LIBRARY_IMPL() blocks to provide implementations of the
/// operator for CPU, CUDA and Autograd.
///
/// In some cases, you need to define something that applies to all namespaces,
/// not just one namespace (usually a fallback). In that case, use the reserved
/// namespace _, e.g.,
///
/// ```
/// TORCH_LIBRARY_IMPL(_, XLA, m) {
/// m.fallback(xla_fallback);
/// }
/// ```
///
/// Example usage:
///
/// ```
/// TORCH_LIBRARY_IMPL(myops, CPU, m) {
/// // m is a torch::Library; methods on it will define
/// // CPU implementations of operators in the myops namespace.
/// // It is NOT valid to call torch::Library::def()
/// // in this context.
/// m.impl("add", add_cpu_impl);
/// }
/// ```
///
/// If ``add_cpu_impl`` is an overloaded function, use a
/// ``static_cast`` to specify which overload you want
/// (by providing the full type).
///
// NB: if the dispatch key is not whitelisted, we simply omit the Library
// call entirely
#define TORCH_LIBRARY_IMPL(ns, k, m) _TORCH_LIBRARY_IMPL(ns, k, m, C10_UID)
/// \private
///
/// The above macro requires an extra unique identifier (uid) to prevent
/// variable name collisions. This can happen if TORCH_LIBRARY_IMPL is called
/// multiple times with the same namespace and dispatch key in the same
/// translation unit.
#define _TORCH_LIBRARY_IMPL(ns, k, m, uid) \
static void C10_CONCATENATE( \
TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid)(torch::Library&); \
static const torch::detail::TorchLibraryInit C10_CONCATENATE( \
TORCH_LIBRARY_IMPL_static_init_##ns##_##k##_, uid)( \
torch::Library::IMPL, \
&C10_CONCATENATE(TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid), \
#ns, \
std::make_optional(c10::DispatchKey::k), \
__FILE__, \
__LINE__); \
void C10_CONCATENATE( \
TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid)(torch::Library & m)
// These are variants of the macros above which are to be used for testing (they
// don't setup the static initializer, so you can control the visibility of
// the allocated library yourself).
//
// DO NOT use these in production code, they are NOT understood by the
// code analyzer and will be incorrectly analyzed in those situations.
/// \private
#define MAKE_TORCH_LIBRARY(ns) \
torch::Library(torch::Library::DEF, #ns, std::nullopt, __FILE__, __LINE__)
/// \private
#define MAKE_TORCH_LIBRARY_IMPL(ns, k) \
torch::Library( \
torch::Library::IMPL, \
#ns, \
std::make_optional(c10::DispatchKey::k), \
__FILE__, \
__LINE__)
// Make the custom class API visible, so it is available from
// torch::Library.
#include <torch/custom_class.h>
```
|
====================================================================================================================
SOURCE CODE FILE: script.h
LINES: 1
SIZE: 0.47 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\script.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/api/include/torch/types.h>
#include <torch/csrc/autograd/InferenceMode.h>
#include <torch/csrc/autograd/custom_function.h>
#include <torch/csrc/autograd/generated/variable_factories.h>
#include <torch/csrc/autograd/grad_mode.h>
#include <torch/csrc/jit/runtime/custom_operator.h>
#include <torch/csrc/jit/serialization/import.h>
#include <torch/csrc/jit/serialization/pickle.h>
#include <torch/custom_class.h>
#include <ATen/ATen.h>
```
|
===============================================================================================================
SOURCE CODE FILE: xnnpack.h
LINES: 1
SIZE: 201.03 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\xnnpack.h
ENCODING: utf-8
```h
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include "pthreadpool.h"
#ifdef __cplusplus
extern "C" {
#endif
/// The number of bytes XNNPACK may read beyond array bounds.
/// The caller must allocate at least this many extra bytes after the tensor data passed to XNNPACK.
///
/// Note: XNNPACK reads, but never writes beyond array bounds.
#if XNN_ARCH_HEXAGON
#define XNN_EXTRA_BYTES 128
#else
#define XNN_EXTRA_BYTES 16
#endif // XNN_ARCH_HEXAGON
/// Maximum number of dimensions in tensor shape.
#define XNN_MAX_TENSOR_DIMS 6
/// A value ID that cannot be valid.
#define XNN_INVALID_VALUE_ID UINT32_MAX
/// Allow sparse inference in a Runtime.
///
/// Note: this flag is a hint to XNNPACK that it should consider sparse inference, but does not guarantee it.
#define XNN_FLAG_HINT_SPARSE_INFERENCE 0x00000001
/// Allow IEEE FP16 inference in a Runtime.
///
/// Note: this flag hints XNNPACK to consider IEEE FP16 inference, but does not guarantee it.
#define XNN_FLAG_HINT_FP16_INFERENCE 0x00000002
/// Force IEEE FP16 inference in a Runtime, and fail if FP16 inference is not possible.
///
/// Note: this flag guarantees that XNNPACK will use IEEE FP16 inference, or fail to create the Runtime object.
/// Warning: on x86 systems FP16 computations will be emulated at a substantial performance cost.
#define XNN_FLAG_FORCE_FP16_INFERENCE 0x00000004
/// Enable timing of each operator's runtime.
#define XNN_FLAG_BASIC_PROFILING 0x00000008
/// Enable the just-in-time compiler.
#define XNN_FLAG_JIT 0x00000010
/// The convolution operator represents a depthwise convolution, and use HWGo layout for filters.
#define XNN_FLAG_DEPTHWISE_CONVOLUTION 0x00000001
/// Assume transposed weights in a fully connected operator.
#define XNN_FLAG_TRANSPOSE_WEIGHTS 0x00000001
/// The operator assumes NHWC layout for the input, regardless of the output layout.
#define XNN_FLAG_INPUT_NHWC 0x00000002
/// Match "SAME" padding in TensorFlow. Exact padding values are computed dynamically depending on input size.
#define XNN_FLAG_TENSORFLOW_SAME_PADDING 0x00000004
/// Assume transposed weights in a batch matrix multiply operator.
#define XNN_FLAG_TRANSPOSE_B XNN_FLAG_TRANSPOSE_WEIGHTS
/// Assume transposed input in a batch matrix multiply operator.
#define XNN_FLAG_TRANSPOSE_A 0x00000002
/// Implicitly flatten and reshape input of a Fully Connected operator into a 2D tensor.
#define XNN_FLAG_TENSORFLOW_RESHAPE_2D 0x00000004
/// Match behaviour of TensorFlow 1.x.
#define XNN_FLAG_TENSORFLOW_LEGACY_MODE 0x00000004
/// Static weights of the FP16 operator are in FP32 format.
#define XNN_FLAG_FP32_STATIC_WEIGHTS 0x00000008
/// Static biases of the FP16 operator are in FP32 format.
#define XNN_FLAG_FP32_STATIC_BIASES 0x00000080
/// Align corners of input and output images in resize operations.
#define XNN_FLAG_ALIGN_CORNERS 0x00000008
/// Yield worker threads of the thread pool to the system scheduler after the inference.
#define XNN_FLAG_YIELD_WORKERS 0x00000010
/// Use transient indirection buffer to reduce memory footprint
#define XNN_FLAG_TRANSIENT_INDIRECTION_BUFFER 0x00000020
/// Retain reduced dimensions with length 1.
#define XNN_FLAG_KEEP_DIMS 0x00000040
// Next unused flag value: 0x00000100.
/// The number of entries in an array of xnn_quantization_params that XNNPACK may read beyond array bounds.
/// The caller must allocate at least this many extra xnn_quantization_params before passing the array to XNNPACK.
///
/// Note: XNNPACK reads, but never writes beyond array bounds.
#define XNN_EXTRA_QUANTIZATION_PARAMS 15
/// The minimum blocksize for blockwise quantized operators.
#define XNN_MIN_BLOCKSIZE 32
#ifdef __GNUC__
#define XNN_DEPRECATED __attribute__((deprecated))
#else
#define XNN_DEPRECATED
#endif
struct xnn_quantization_params {
int32_t zero_point;
float scale;
};
/// Status code for any XNNPACK function call.
enum xnn_status {
/// The call succeeded, and all output arguments now contain valid data.
xnn_status_success = 0,
xnn_status_uninitialized = 1,
xnn_status_invalid_parameter = 2,
xnn_status_invalid_state = 3,
xnn_status_unsupported_parameter = 4,
xnn_status_unsupported_hardware = 5,
xnn_status_out_of_memory = 6,
xnn_status_reallocation_required = 7,
xnn_status_deprecated = 8,
};
struct xnn_allocator {
/// User-specified pointer that will be passed as-is to all functions in this structure.
void* context;
/// Pointer to a function to be called for general memory allocation.
///
/// @param context - The user-specified pointer from xnn_allocator structure.
/// @param size - The size of the memory block to allocate, in bytes.
///
/// @returns Pointer to the allocated memory block of at least @ref size bytes.
/// If allocation fails, the function must return NULL.
void* (*allocate)(void* context, size_t size);
/// Pointer to a function to be called for general memory re-allocation, i.e. to increase or shrink a previously
/// allocated memory block. The content of the old memory block is copied to the new memory block.
///
/// @param context - The user-specified pointer from xnn_allocator structure.
/// @param pointer - Pointer to a memory block allocated by @ref allocate or @ref reallocate functions. Can be NULL.
/// If the pointer is NULL, the @ref reallocate call is equivalent to an @ref allocate call.
/// @param size - The new size of the memory block to allocate, in bytes.
///
/// @returns Pointer to the newly allocated memory block of at least @ref size bytes with the content of the previous
/// memory block.
/// If allocation fails, the function must return NULL, but must not release the previous memory block.
void* (*reallocate)(void* context, void* pointer, size_t size);
/// Pointer to a function to be called for general memory de-allocation.
///
/// @param context - The user-specified pointer from xnn_allocator structure.
/// @param pointer - Pointer to a memory block allocated by @ref allocate or @ref reallocate functions. Can be NULL.
/// If the pointer is NULL, the @ref deallocate call is a no-op.
void (*deallocate)(void* context, void* pointer);
/// Pointer to a function to be called for aligned memory allocation.
///
/// @param context - The user-specified pointer from xnn_allocator structure.
/// @param alignment - The alignment of the memory block to allocate, in bytes. Alignment is always a power-of-2.
/// @param size - The size of the memory block to allocate, in bytes.
///
/// @returns Pointer to the allocated memory block of at least @ref size bytes.
/// If allocation fails, the function must return NULL.
void* (*aligned_allocate)(void* context, size_t alignment, size_t size);
/// Pointer to a function to be called for aligned memory deallocation.
///
/// @param context - The user-specified pointer from xnn_allocator structure.
/// @param pointer - Pointer to a memory block allocated by @ref aligned_allocate function. Can be NULL.
/// If the pointer is NULL, the @ref aligned_deallocate call is a no-op.
void (*aligned_deallocate)(void* context, void* pointer);
};
/// Initialize XNNPACK library.
///
/// XNNPACK must be successfully initialized before use. During initialization, XNNPACK populates internal structures
/// depending on the host processor. Initialization can be time-consuming.
///
/// @param[in] allocator - structure with function pointers to be use for memory allocation and de-allocation.
/// If this argument is NULL, system-provided memory management functions (e.g. malloc/free)
/// will be used.
///
/// @retval xnn_status_success - XNNPACK is successfully initialized and ready to use.
/// @retval xnn_status_out_of_memory - initialization failed due to out-of-memory condition.
/// @retval xnn_status_unsupported_hardware - initialization failed because the host processor does not satisfy the
/// minimum hardware requirements for XNNPACK. E.g. this may happen on x86
/// processors without SSE2 extension, or on 32-bit ARM processors without
/// the NEON SIMD extension.
enum xnn_status xnn_initialize(const struct xnn_allocator* allocator);
/// Deinitialize XNNPACK library.
///
/// To avoid memory and resource leaks, users must call xnn_deinitialize once for each successful xnn_initialize call.
///
/// @retval xnn_status_success - deinitialization call succeeded.
enum xnn_status xnn_deinitialize(void);
/// Get the microkernel implementation build identifier's data.
///
/// That identifier will be unique for the current set of microkernels implementations.
///
/// @returns A pointer to the current identifier's data.
const void* xnn_experimental_get_build_identifier_data();
/// Get the microkernel implementation build identifier's data size.
///
/// @returns The size in bytes of the identifier's data.
size_t xnn_experimental_get_build_identifier_size();
/// Check whether the given data matches this version's identifier.
///
/// @returns The size in bytes of the identifier's data.
bool xnn_experimental_check_build_identifier(const void* data, size_t size);
/// Subgraph is an abstract representation of a neural network model.
/// Subgraph objects are used to define Values (tensors) and Nodes (operators) comprising the model.
typedef struct xnn_subgraph* xnn_subgraph_t;
/// Create a empty Subgraph object.
///
/// @param external_value_ids - number of Value IDs to reserve for communication with external graph representation.
/// The Subgraph object would avoid creating internal Value IDs in the
/// [0, reserved_value_ids-1] range.
/// @param flags - binary features of the subgraph. No supported flags are currently defined.
/// @param subgraph_out - pointer to the variable that will be initialized with a handle to the Subgraph object upon
/// successful return.
enum xnn_status xnn_create_subgraph(
uint32_t external_value_ids,
uint32_t flags,
xnn_subgraph_t* subgraph_out);
/// Destroy a Subgraph object, as well as Values, and Nodes associated with the subgraph.
///
/// @param subgraph - the Subgraph object to destroy.
enum xnn_status xnn_delete_subgraph(
xnn_subgraph_t subgraph);
#define XNN_VALUE_FLAG_EXTERNAL_INPUT 0x00000001
#define XNN_VALUE_FLAG_EXTERNAL_OUTPUT 0x00000002
#define XNN_VALUE_FLAG_PERSISTENT 0x00000004
#define XNN_INVALID_VALUE_ID UINT32_MAX
/// Type of elements in a Value object.
enum xnn_datatype {
/// Invalid data type. Valid Values never have this datatype.
xnn_datatype_invalid = 0,
/// IEEE754 single-precision floating-point.
xnn_datatype_fp32 = 1,
/// IEEE754 half-precision floating-point.
xnn_datatype_fp16 = 2,
/// Quantized 8-bit signed integer with shared per-Value quantization
/// parameters.
xnn_datatype_qint8 = 3,
/// Quantized 8-bit unsigned integer with shared per-Value quantization
/// parameters.
xnn_datatype_quint8 = 4,
/// Quantized 32-bit signed integer with shared per-Value quantization
/// parameters.
xnn_datatype_qint32 = 5,
/// Quantized 8-bit signed integer with shared per-channel quantization
/// parameters.
xnn_datatype_qcint8 = 6,
/// Quantized 32-bit signed integer with shared per-channel quantization
/// parameters.
xnn_datatype_qcint32 = 7,
/// Quantized 4-bit signed integer with shared per-channel quantization
/// parameters.
xnn_datatype_qcint4 = 8,
/// Dynamically quantized 8-bit signed integer with per-batch quantization
/// parameters.
xnn_datatype_qdint8 = 9,
/// Dynamically quantized 8-bit signed integers packed with their per-row
/// quantization parameters.
xnn_datatype_qpint8 = 10,
/// 32-bit signed integers.
xnn_datatype_int32 = 11,
/// Quantized 4-bit signed integer with shared per-channel-block quantization
/// parameters.
xnn_datatype_qbint4 = 12,
/// IEEE754 single-precision packed floating-point.
xnn_datatype_pfp32 = 13,
/// BFloat16, i.e. the upper 16 bits of a float32.
xnn_datatype_bf16 = 14,
/// Dynamically quantized 8-bit unsigned integer with per-batch quantization
/// parameters.
xnn_datatype_qduint8 = 15,
};
/// Define a tensor-type Value and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Value.
/// @param datatype - type of the tensor elements.
/// @param num_dims - number of dimensions in the shape.
/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL.
/// XNNPACK does not keep any pointers to this array after the function returns.
/// @param data - pointer to static data used for tensor initialization. If the tensor is not statically initialized,
/// this pointer must be is NULL. If non-NULL, the life-time of the static data must exceed the life-time
/// of the Subgraph object, and of any Runtime objects created from the Subgraph.
/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on
/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be
/// created for the Value.
/// @param flags - binary features of the Value. Supported values are any combination of XNN_VALUE_FLAG_EXTERNAL_INPUT
/// and XNN_VALUE_FLAG_EXTERNAL_OUTPUT.
/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a
/// valid @a external_id was provided, the variable will be initialized with the @a external_id value.
enum xnn_status xnn_define_tensor_value(
xnn_subgraph_t subgraph,
enum xnn_datatype datatype,
size_t num_dims,
const size_t* dims,
const void* data,
uint32_t external_id,
uint32_t flags,
uint32_t* id_out);
/// Define a quantized tensor-type Value and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Value.
/// @param datatype - type of the tensor elements.
/// @param zero_point - offset from zero to subtract from the quantized elements in the Value.
/// @param scale - multiplication factor to convert quantized elements to real representation.
/// @param num_dims - number of dimensions in the shape.
/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL.
/// XNNPACK does not keep any pointers to this array after the function returns.
/// @param data - pointer to static data used for tensor initialization. If the tensor is not statically initialized,
/// this pointer must be is NULL. If non-NULL, the life-time of the static data must exceed the life-time
/// of the Subgraph object, and of any Runtime objects created from the Subgraph.
/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on
/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be
/// created for the Value.
/// @param flags - binary features of the Value. Supported values are any combination of XNN_VALUE_FLAG_EXTERNAL_INPUT
/// and XNN_VALUE_FLAG_EXTERNAL_OUTPUT.
/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a
/// valid @a external_id was provided, the variable will be initialized with the @a external_id value.
enum xnn_status xnn_define_quantized_tensor_value(
xnn_subgraph_t subgraph,
enum xnn_datatype datatype,
int32_t zero_point,
float scale,
size_t num_dims,
const size_t* dims,
const void* data,
uint32_t external_id,
uint32_t flags,
uint32_t* id_out);
enum xnn_status xnn_define_channelwise_quantized_tensor_value(
xnn_subgraph_t subgraph,
enum xnn_datatype datatype,
const float* scale,
size_t num_dims,
size_t channel_dim,
const size_t* dims,
const void* data,
uint32_t external_id,
uint32_t flags,
uint32_t* id_out);
/// Validate the dimensions, channel_dim, zero point, datatype, and scale of a quantized tensor-type.
///
/// @param datatype - type of the tensor elements.
/// @param zero_point - offset from zero to subtract from the quantized elements in the Value.
/// @param scale - multiplication factor to convert quantized elements to real representation.
/// @param num_dims - number of dimensions in the shape.
/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL.
/// XNNPACK does not keep any pointers to this array after the function returns.
enum xnn_status xnn_validate_quantized_tensor(
enum xnn_datatype datatype,
int32_t zero_point,
float scale,
size_t num_dims,
const size_t* dims);
/// Validate the dimensions, channel_dim, zero point, datatype, and scales of a channelwise quantized tensor-type.
///
/// @param datatype - type of the tensor elements.
/// @param zero_point - offset from zero to subtract from the quantized elements in the Value.
/// @param scale - per-channel multiplication factors to convert quantized elements to real representation.
/// @param num_dims - number of dimensions in the shape.
/// @param channel_dim - index of the channel dimension in the tensor with per-channel quantization parameters.
/// Typically this is the first dimension (dimension #0) of the filter tensors in the Convolution,
/// Deconvolution, and Fully Connected operators and the last dimension of the filter tensors in
/// the Depthwise Convolution operators.
/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL.
/// XNNPACK does not keep any pointers to this array after the function returns.
enum xnn_status xnn_validate_channelwise_quantized_tensor(
enum xnn_datatype datatype,
int32_t zero_point,
const float* scale,
size_t num_dims,
size_t channel_dim,
const size_t* dims);
/// Define a channelwise quantized tensor-type Value and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Value.
/// @param datatype - type of the tensor elements.
/// @param zero_point - offset from zero to subtract from the quantized elements in the Value.
/// @param scale - per-channel multiplication factors to convert quantized elements to real representation.
/// @param num_dims - number of dimensions in the shape.
/// @param channel_dim - index of the channel dimension in the tensor with per-channel quantization parameters.
/// Typically this is the first dimension (dimension #0) of the filter tensors in the Convolution,
/// Deconvolution, and Fully Connected operators and the last dimension of the filter tensors in
/// the Depthwise Convolution operators.
/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL.
/// XNNPACK does not keep any pointers to this array after the function returns.
/// @param data - pointer to static data used for tensor initialization. If the tensor is not statically initialized,
/// this pointer must be is NULL. If non-NULL, the life-time of the static data must exceed the life-time
/// of the Subgraph object, and of any Runtime objects created from the Subgraph.
/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on
/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be
/// created for the Value.
/// @param flags - binary features of the Value. Supported values are any combination of XNN_VALUE_FLAG_EXTERNAL_INPUT
/// and XNN_VALUE_FLAG_EXTERNAL_OUTPUT.
/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a
/// valid @a external_id was provided, the variable will be initialized with the @a external_id value.
enum xnn_status xnn_define_channelwise_quantized_tensor_value_v2(
xnn_subgraph_t subgraph,
enum xnn_datatype datatype,
int32_t zero_point,
const float* scale,
size_t num_dims,
size_t channel_dim,
const size_t* dims,
const void* data,
uint32_t external_id,
uint32_t flags,
uint32_t* id_out);
/// Define a blockwise quantized tensor-type Value and add it to a Subgraph.
/// @param block_size - size of a block in the tensor with blockwise quantization parameters. Block is defined as
/// number of input channel element per output channel.
/// For Fully connected operators with 2d filters of size [output_channels, input_channels],
/// expecting number of scale values to be = output_channels * (input_channels / block_size).
enum xnn_status xnn_define_blockwise_quantized_tensor_value(
xnn_subgraph_t subgraph,
enum xnn_datatype datatype,
int32_t zero_point,
const uint16_t* scale,
size_t num_dims,
size_t channel_dim,
size_t block_size,
const size_t* dims,
const void* data,
uint32_t external_id,
uint32_t flags,
uint32_t* id_out);
/// Define a dynamically quantized tensor-type Value and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Value.
/// @param datatype - type of the tensor elements.
/// @param num_dims - number of dimensions in the shape.
/// @param num_non_batch_dims - number of non-batch dimensions in the shape. The leading (num_dims - num_non_batch_dims)
/// dimensions will be flattened and treated as batch size. A set of quantization parameters
/// will be calculated for each batch element.
/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL.
/// XNNPACK does not keep any pointers to this array after the function returns.
/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on
/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be
/// created for the Value.
/// @param flags - binary features of the Value. No supported flags are currently defined.
/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a
/// valid @a external_id was provided, the variable will be initialized with the @a external_id value.
enum xnn_status xnn_define_dynamically_quantized_tensor_value(
xnn_subgraph_t subgraph,
enum xnn_datatype datatype,
size_t num_dims,
size_t num_nonbatch_dims,
const size_t* dims,
uint32_t external_id,
uint32_t flags,
uint32_t* id_out);
/// Type of unary operation
enum xnn_unary_operator {
xnn_unary_invalid = -1,
xnn_unary_convert,
xnn_unary_clamp,
xnn_unary_abs,
xnn_unary_bankers_rounding,
xnn_unary_ceiling,
xnn_unary_elu,
xnn_unary_exp,
xnn_unary_floor,
xnn_unary_gelu,
xnn_unary_hardswish,
xnn_unary_leaky_relu,
xnn_unary_log,
xnn_unary_negate,
xnn_unary_sigmoid,
xnn_unary_square,
xnn_unary_square_root,
xnn_unary_reciprocal_square_root,
xnn_unary_tanh,
// The following operators are experimental and may be removed.
xnn_unary_cube_root,
xnn_unary_cosine,
xnn_unary_sine,
xnn_unary_count_leading_zeros,
xnn_unary_bitwise_not,
xnn_unary_popcount,
xnn_unary_sign,
};
/// Parameters for xnn_define_unary
union xnn_unary_params {
struct {
/// lower bound for clipping output values.
float min;
/// upper bound for clipping output values.
float max;
} clamp;
struct {
/// scale factor for negative input elements.
float alpha;
} elu;
struct {
/// scale factor for negative input elements.
float negative_slope;
} leaky_relu;
};
/// Define a unary operator Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param operator - type of unary operator to define.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param params - parameters to be interpreted by the specific operator type.
/// @param flags - binary features of the Node. No supported flags are currently defined.
enum xnn_status xnn_define_unary(
xnn_subgraph_t subgraph,
enum xnn_unary_operator type,
const union xnn_unary_params* params,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Convert Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Convert Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_convert(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2D Convolution Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING
/// flag is specified.
/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if
/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified.
/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if
/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified.
/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if
/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified.
/// @param kernel_height - kernel (filter) height.
/// @param kernel_width - kernel (filter) width.
/// @param subsampling_height - height of subsampling region for convolution output (convolution height stride).
/// @param subsampling_width - width of subsampling region for convolution output (convolution width stride).
/// @param dilation_height - dilation of kernel elements along the height dimension.
/// @param dilation_width - dilation of kernel elements along the width dimension.
/// @param groups - number of convolution groups.
/// @param group_input_channels - number of input channels per group.
/// @param group_output_channels - number of output channels per group.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph
/// with [N, IH, IW, groups * group_input_channels] dimensions
/// @param filter_id - Value ID for the filter tensor. The filter tensor must ge a 4D tensor defined in the @a subgraph
/// with [groups * group_output_channels, kernel_height, kernel_width, group_input_channels]
/// dimensions.
/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a 2D Convolution Node without a bias. If
/// present, the bias tensor must be a 1D tensor defined in the @a subgraph with [groups *
/// group_output_channels] dimensions.
/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph
/// with [N, OH, OW, groups * group_output_channels] dimensions.
/// @param flags - binary features of the 2D Convolution Node. The only currently supported values is
/// XNN_FLAG_TENSORFLOW_SAME_PADDING.
enum xnn_status xnn_define_convolution_2d(
xnn_subgraph_t subgraph,
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
float output_min,
float output_max,
uint32_t input_id,
uint32_t filter_id,
uint32_t bias_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2D Deconvolution (Transposed Convolution) Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param padding_top - implicit padding above 2D output data.
/// @param padding_right - implicit padding to the right of 2D output data.
/// @param padding_bottom - implicit padding below 2D output data.
/// @param padding_left - implicit padding to the left of 2D output data.
/// @param adjustment_height - additional elements in the bottom of the 2D output data.
/// @param adjustment_width - additional elements to the right of the 2D output data.
/// @param kernel_height - kernel (filter) height.
/// @param kernel_width - kernel (filter) width.
/// @param upsampling_height - height of upsampling region for deconvolution input (deconvolution height stride).
/// @param upsampling_width - width of upsampling region for deconvolution input (deconvolution width stride).
/// @param dilation_height - dilation of kernel elements along the height dimension.
/// @param dilation_width - dilation of kernel elements along the width dimension.
/// @param groups - number of convolution groups.
/// @param group_input_channels - number of input channels per group.
/// @param group_output_channels - number of output channels per group.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph
/// with [N, IH, IW, groups * group_input_channels] dimensions
/// @param filter_id - Value ID for the filter tensor. The filter tensor must ge a 4D tensor defined in the @a subgraph
/// with [groups * group_output_channels, kernel_height, kernel_width, group_input_channels]
/// dimensions.
/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a 2D Convolution Node without a bias. If
/// present, the bias tensor must be a 1D tensor defined in the @a subgraph with
/// [groups * group_output_channels] dimensions.
/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph
/// with [N, OH, OW, groups * group_output_channels] dimensions.
/// @param flags - binary features of the 2D Deconvolution Node. No supported flags are currently defined.
enum xnn_status xnn_define_deconvolution_2d(
xnn_subgraph_t subgraph,
uint32_t padding_top,
uint32_t padding_right,
uint32_t padding_bottom,
uint32_t padding_left,
uint32_t adjustment_height,
uint32_t adjustment_width,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t upsampling_height,
uint32_t upsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
float output_min,
float output_max,
uint32_t input_id,
uint32_t filter_id,
uint32_t bias_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2D Depthwise Convolution Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING
/// flag is specified.
/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if
/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified.
/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if
/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified.
/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if
/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified.
/// @param kernel_height - kernel (filter) height.
/// @param kernel_width - kernel (filter) width.
/// @param subsampling_height - height of subsampling region for convolution output (convolution height stride).
/// @param subsampling_width - width of subsampling region for convolution output (convolution width stride).
/// @param dilation_height - dilation of kernel elements along the height dimension.
/// @param dilation_width - dilation of kernel elements along the width dimension.
/// @param depth_multiplier - ratio of output channels to input channels.
/// @param input_channels - number of input channels.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph
/// with [N, IH, IW, input_channels] dimensions
/// @param filter_id - Value ID for the filter tensor. The filter tensor must ge a 4D tensor defined in the @a subgraph
/// with [1, kernel_height, kernel_width, input_channels * depth_multiplier] dimensions.
/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a 2D Depthwise Convolution Node without
/// a bias. If present, the bias tensor must be a 1D tensor defined in the @a subgraph with
/// [input_channels * depth_multiplier] dimensions.
/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph
/// with [N, OH, OW, input_channels * depth_multiplier] dimensions.
/// @param flags - binary features of the 2D Depthwise Convolution Node. The only currently supported values is
/// XNN_FLAG_TENSORFLOW_SAME_PADDING.
enum xnn_status xnn_define_depthwise_convolution_2d(
xnn_subgraph_t subgraph,
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t depth_multiplier,
size_t input_channels,
float output_min,
float output_max,
uint32_t input_id,
uint32_t filter_id,
uint32_t bias_id,
uint32_t output_id,
uint32_t flags);
/// Define a Depth To Space Node 2D and add it to a Subgraph.
///
/// The Depth To Space 2D Node rearranges data from depth into blocks of spatial data (a reverse transform to
/// Space To Depth). For a given input pixel, an output square of pixels with side @a block_size is formed from values
/// in the corresponding number of its channels. The output depth is therefore @a block_size x @a block_size times
/// smaller than that of the input.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param block_size - the size of the spatial block.
/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph
/// with [N, IH, IW, OC * block_size * block_size] dimensions.
/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph
/// with [N, IH * block_size, IW * block_size, OC] dimensions.
/// @param flags - binary features of the input_channels Node. No supported flags are currently defined.
enum xnn_status xnn_define_depth_to_space_2d(
xnn_subgraph_t subgraph,
uint32_t block_size,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
enum xnn_status xnn_define_depth_to_space(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t block_size,
uint32_t flags);
/// Define a 1D Global Average Pooling Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 2 or more dimensions
/// defined in the @a subgraph. Averaging is performed across the second-innermost dimension.
/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 2 or more
/// dimensions defined in the @a subgraph.
/// @param flags - binary features of the 1D Global Average Pooling Node. The only currently supported value is
/// XNN_FLAG_KEEP_DIMS.
XNN_DEPRECATED enum xnn_status xnn_define_global_average_pooling_1d(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2D Global Average Pooling Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 3 or more dimensions
/// defined in the @a subgraph. Averaging is performed across the second- and third-innermost
/// dimensions.
/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 3 or more
/// dimensions defined in the @a subgraph.
/// @param flags - binary features of the 2D Global Average Pooling Node. The only currently supported value is
/// XNN_FLAG_KEEP_DIMS.
XNN_DEPRECATED enum xnn_status xnn_define_global_average_pooling_2d(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a 1D Global Sum Pooling Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 2 or more dimensions
/// defined in the @a subgraph. Averaging is performed across the second-innermost dimension.
/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 2 or more
/// dimensions defined in the @a subgraph.
/// @param flags - binary features of the 1D Global Sum Pooling Node. The only currently supported value is
/// XNN_FLAG_KEEP_DIMS.
XNN_DEPRECATED enum xnn_status xnn_define_global_sum_pooling_1d(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2D Global Sum Pooling Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 3 or more dimensions
/// defined in the @a subgraph. Averaging is performed across the second- and third-innermost
/// dimensions.
/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 3 or more
/// dimensions defined in the @a subgraph.
/// @param flags - binary features of the 2D Global Sum Pooling Node. The only currently supported value is
/// XNN_FLAG_KEEP_DIMS.
XNN_DEPRECATED enum xnn_status xnn_define_global_sum_pooling_2d(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2D Average Pooling Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING
/// flag is specified.
/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if
/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified.
/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if
/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified.
/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if
/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified.
/// @param pooling_height - pooling (kernel) height.
/// @param pooling_width - pooling (kernel) width.
/// @param stride_height - displacing of the pooling window in the vertical dimension of the input pixels corresponding
/// to vertically adjacent output pixels.
/// @param stride_width - displacing of the pooling window in the horizontal dimension of the input pixels corresponding
/// to horizontally adjacent output pixels.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph
/// with [N, IH, IW, channels] dimensions
/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph
/// with [N, OH, OW, channels] dimensions.
/// @param flags - binary features of the 2D Average Pooling Node. The only currently supported values is
/// XNN_FLAG_TENSORFLOW_SAME_PADDING.
enum xnn_status xnn_define_average_pooling_2d(
xnn_subgraph_t subgraph,
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Fully Connected Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the
/// @a subgraph. If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the input tensor must be at least
/// 1D and its last dimension must match the last dimension of the filter tensor. In particular, if
/// input is a 2D tensor, it must have [batch_size, input_channels] dimensions.
/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, the number of elements in the input tensor must be
/// divisible by the input_channels. The tensor will be first flattened into a 1D tensor of
/// [num_input_elements] dimensions, then reshaped into a 2D tensor of
/// [num_input_elements / input_channels, input_channels] dimensions where num_input_elements is the
/// total number of elements in the input tensor.
/// @param filter_id - Value ID for the filter tensor. The filter tensor must a 2D tensor defined in the @a subgraph.
/// If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is not specified, the filter tensor must have
/// [output_channels, input_channels] dimensions. If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is
/// specified, the filter tensor must have [input_channels, output_channels] dimensions.
/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a Fully Connected Node without a bias.
/// If present, the bias tensor must be a 1D tensor defined in the @a subgraph with [output_channels]
/// dimensions.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph.
/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the output tensor must have the same
/// dimensionality as the input tensor, all its dimensions but the last one must match the
/// corresponding dimensions of the input tensor, and the last dimensions of the output tensor must
/// match the first dimension of the filter tensor. In particular, if input is a 2D tensor, output
/// must be a 2D tensor of [batch_size, output_channels] dimensions.
/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, output must be a 2D tensor of
/// [num_input_elements / input_channels, output_channels] dimensions where num_input_elements is the
/// total number of elements in the input tensor.
/// @param flags - binary features of the Fully Connected Node. The only currently supported values are
/// XNN_FLAG_TENSORFLOW_RESHAPE_2D and XNN_FLAG_TRANSPOSE_WEIGHTS.
enum xnn_status xnn_define_fully_connected(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input_id,
uint32_t filter_id,
uint32_t bias_id,
uint32_t output_id,
uint32_t flags);
/// Define a Sparse Fully Connected Node and add it to a Subgraph.
///
/// This operator is experimental, and will be removed in the future.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the
/// @a subgraph. If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the input tensor must be at least
/// 1D and its last dimension must match the last dimension of the filter tensor. In particular, if
/// input is a 2D tensor, it must have [batch_size, input_channels] dimensions.
/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, the number of elements in the input tensor must be
/// divisible by the input_channels. The tensor will be first flattened into a 1D tensor of
/// [num_input_elements] dimensions, then reshaped into a 2D tensor of
/// [num_input_elements / input_channels, input_channels] dimensions where num_input_elements is the
/// total number of elements in the input tensor.
/// @param filter_id - Value ID for the filter tensor. The filter tensor must a 2D tensor defined in the @a subgraph.
/// If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is not specified, the filter tensor must have
/// [output_channels, input_channels] dimensions. If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is
/// specified, the filter tensor must have [input_channels, output_channels] dimensions.
/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a Fully Connected Node without a bias.
/// If present, the bias tensor must be a 1D tensor defined in the @a subgraph with [output_channels]
/// dimensions.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph.
/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the output tensor must have the same
/// dimensionality as the input tensor, all its dimensions but the last one must match the
/// corresponding dimensions of the input tensor, and the last dimensions of the output tensor must
/// match the first dimension of the filter tensor. In particular, if input is a 2D tensor, output
/// must be a 2D tensor of [batch_size, output_channels] dimensions.
/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, output must be a 2D tensor of
/// [num_input_elements / input_channels, output_channels] dimensions where num_input_elements is the
/// total number of elements in the input tensor.
/// @param flags - binary features of the Fully Connected Node. The only currently supported values are
/// XNN_FLAG_TENSORFLOW_RESHAPE_2D and XNN_FLAG_TRANSPOSE_WEIGHTS.
enum xnn_status xnn_define_fully_connected_sparse(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input_id,
uint32_t filter_id,
uint32_t bias_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2D Max Pooling Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING
/// flag is specified.
/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if
/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified.
/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if
/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified.
/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if
/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified.
/// @param pooling_height - pooling (kernel) height.
/// @param pooling_width - pooling (kernel) width.
/// @param stride_height - displacing of the pooling window in the vertical dimension of the input pixels corresponding
/// to vertically adjacent output pixels.
/// @param stride_width - displacing of the pooling window in the horizontal dimension of the input pixels corresponding
/// to horizontally adjacent output pixels.
/// @param dilation_height - dilation of pooling elements along the height dimension.
/// @param dilation_width - dilation of pooling elements along the width dimension.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph
/// with [N, IH, IW, channels] dimensions
/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph
/// with [N, OH, OW, channels] dimensions.
/// @param flags - binary features of the 2D Max Pooling Node. The only currently supported values is
/// XNN_FLAG_TENSORFLOW_SAME_PADDING.
enum xnn_status xnn_define_max_pooling_2d(
xnn_subgraph_t subgraph,
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2D ArgMax Pooling Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_padding_top - implicit zero-padding above 2D input data.
/// @param input_padding_right - implicit zero-padding to the right of 2D input data.
/// @param input_padding_bottom - implicit zero-padding below 2D input data.
/// @param input_padding_left - implicit zero-padding to the left of 2D input data.
/// @param pooling_height - pooling (kernel) height. Vertical stride between pooling regions match this value.
/// @param pooling_width - pooling (kernel) width. Horizontal stride between pooling regions match this value.
/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph
/// with [N, IH, IW, channels] dimensions
/// @param output_value_id - Value ID for the output tensor with the maximum values in the pools. The output tensor must
/// be a 4D tensor defined in the @a subgraph with [N, OH, OW, channels] dimensions.
/// @param output_index_id - Value ID for the output tensor with the indexes of the maximum values in the pools. The
/// output tensor must be a 4D tensor defined in the @a subgraph with [N, OH, OW, channels]
/// dimensions.
/// @param flags - binary features of the 2D ArgMax Pooling Node. No supported flags are currently defined.
enum xnn_status xnn_define_argmax_pooling_2d(
xnn_subgraph_t subgraph,
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t input_id,
uint32_t output_value_id,
uint32_t output_index_id,
uint32_t flags);
/// Define a 2D UnPooling Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param padding_top - implicit padding above 2D output data.
/// @param padding_right - implicit padding to the right of 2D output data.
/// @param padding_bottom - implicit padding below 2D output data.
/// @param padding_left - implicit padding to the left of 2D output data.
/// @param pooling_height - height of the pooling window.
/// @param pooling_width - width of the pooling window.
/// @param input_value_id - Value ID for the input tensor with the max-pooling values to invert. The input value tensor
/// must be a 4D tensor defined in the @a subgraph with [N, IH, IW, channels] dimensions.
/// @param input_index_id - Value ID for the input tensor with the indices of the per-pool maximum values produced by
/// a 2D UnPooling Node. The input tensor must be a 4D tensor defined in the @a subgraph with
/// [N, IH, IW, channels] dimensions.
/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph
/// with [N, OH, OW, channels] dimensions.
/// @param flags - binary features of the 2D UnPooling Node. No supported flags are currently defined.
enum xnn_status xnn_define_unpooling_2d(
xnn_subgraph_t subgraph,
uint32_t padding_top,
uint32_t padding_right,
uint32_t padding_bottom,
uint32_t padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t input_value_id,
uint32_t input_index_id,
uint32_t output_id,
uint32_t flags);
enum xnn_binary_operator {
xnn_binary_invalid = -1,
xnn_binary_add,
xnn_binary_subtract,
xnn_binary_multiply,
xnn_binary_divide,
xnn_binary_maximum,
xnn_binary_minimum,
xnn_binary_copysign,
xnn_binary_squared_difference,
xnn_binary_prelu,
// The following operators are experimental and may be removed.
xnn_binary_modulus,
xnn_binary_atan2,
xnn_binary_pow,
xnn_binary_bitwise_and,
xnn_binary_bitwise_or,
xnn_binary_bitwise_xor,
xnn_binary_shift_left,
xnn_binary_shift_right_logical,
xnn_binary_shift_right_arithmetic,
};
struct xnn_binary_params {
/// lower bound for clipping output values.
double output_min;
/// upper bound for clipping output values.
double output_max;
};
/// Define a 2-Input binary operator Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param type - Type of operator to apply to the two inputs.
/// @param params - Optional parameters for the operator.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the second
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the first
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined
/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension
/// of the two inputs.
/// @param flags - binary features of the Node. No supported flags are currently defined.
enum xnn_status xnn_define_binary(
xnn_subgraph_t subgraph,
enum xnn_binary_operator type,
const struct xnn_binary_params* params,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2-Input Add Node and add it to a Subgraph.
///
/// The 2-Input Add Node computes elementwise addition of two tensor inputs with numpy broadcasting rules.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the second
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the first
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined
/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension
/// of the two inputs.
/// @param flags - binary features of the Add Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_add2(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2-Input Multiply Node and add it to a Subgraph.
///
/// The 2-Input Multiply Node computes elementwise multiplication of two tensor inputs with numpy broadcasting rules.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the second
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the first
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined
/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension
/// of the two inputs.
/// @param flags - binary features of the Multiply Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_multiply2(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags);
// Cap operations applied to logits (Q * K) of attention operator.
enum xnn_attention_logits_cap_type {
// No capping.
xnn_attention_logits_cap_type_none = 0,
// Cap the absolute values of logits by tanh: tanh(logits / cap) * cap
xnn_attention_logits_cap_type_tanh
};
// Params when the cap type is xnn_attention_logits_cap_type_tanh.
struct xnn_attention_logits_cap_tanh_params {
float cap;
};
/// Define a Scaled Dot-Product Attention Node and add it to a Subgraph.
///
/// This operator is experimental.
///
/// The Scaled Dot-Product Attention Node computes a multi-head or multi-query scaled dot attention on the query, key,
/// and value tensors.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param cap_type - type of cap to be applied to the logits.
/// @param cap_params - parameters for the cap. Must be a pointer to xnn_attention_logits_cap_tanh_params if cap_type
/// is xnn_attention_logits_cap_type_tanh.
/// @param query_id - Value ID for the query tensor. The query tensor must be a 3+-dimensional tensor defined in the
/// @a subgraph with the dimensions as [*, H, T, C], where H/T/C are the heads/tokens/channels, and *
/// is the 0 or more dimensions treated as batch size.
/// @param key_id - Value ID for the key tensor. The key tensor must be a 2+--dimensional tensor defined in the
/// @a subgraph. It can have the same number of dimensions as the query, with the dimensions as
/// [*, H, U, C] (multi-head), or have 1 less dimension than the query, with the dimensions as
/// as [*, U, C] (multi-query, number of heads omitted implies single head), where H/U/C are the
/// heads/key_value_tokens/channels, and * is the 0 or more dimensions treated as batch size. These
/// batch size dimensions must be the same as query.
/// @param value_id - Value ID for the value tensor. The value tensor must be a 2+--dimensional tensor defined in the
/// @a subgraph. It can have the same number of dimensions as the query, with the dimensions as
/// [*, H, U, D] (multi-head), or have 1 less dimension than the query, with the dimensions as
/// as [*, U, D] (multi-query, number of heads omitted implies single head), where H/U/D are the
/// heads/key_value_tokens/value_channels, and * is the 0 or more dimensions treated as batch size.
/// These batch size dimensions must be the same as query and key.
/// @param scale_id - Value ID for the scale tensor. The scale tensor must be a 1D tensor defined in the @a subgraph
/// with [C] dimensions. The query tensor is multiplied with this scale tensor before the dot product
/// with the key tensor.
/// @param mask_id - Value ID for the mask tensor. The mask tensor must be a 2D tensor defined in the @a subgraph with
/// [T, U] dimensions. The mask tensor is added to the logits (query dot value).
/// @param output_id - Value ID for the output tensor. The output tensor must be a 3+-dimensional tensor defined in the
/// @a subgraph with the dimensions as [*, H, T, D], where H/T/D are the heads/tokens/value_channels,
/// and * is the 0 or more dimensions treated as batch size. These batch size dimensions must be the
/// same as query, key, and value.
/// @param flags - binary features of the Scaled Dot Product Attention Node. No supported flags are currently defined.
enum xnn_status xnn_define_scaled_dot_product_attention(
xnn_subgraph_t subgraph,
enum xnn_attention_logits_cap_type cap_type,
const void* cap_params,
uint32_t query_id,
uint32_t key_id,
uint32_t value_id,
uint32_t scale_id,
uint32_t mask_id,
uint32_t output_id,
uint32_t flags);
/// Define a Subtract Node and add it to a Subgraph.
///
/// The Subtract Node computes elementwise subtraction of two tensor inputs with numpy broadcasting rules.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the second
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the first
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined
/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension
/// of the two inputs.
/// @param flags - binary features of the Subtract Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_subtract(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags);
/// Define a Divide Node and add it to a Subgraph.
///
/// The Divide Node computes elementwise division of two tensor inputs with numpy broadcasting rules.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the second
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the first
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined
/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension
/// of the two inputs.
/// @param flags - binary features of the Divide Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_divide(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2-Input Maximum Node and add it to a Subgraph.
///
/// The 2-Input Maximum Node computes elementwise maximum of two tensor inputs with numpy broadcasting rules.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the second
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the first
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined
/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension
/// of the two inputs.
/// @param flags - binary features of the Maximum Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_maximum2(
xnn_subgraph_t subgraph,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2-Input Minimum Node and add it to a Subgraph.
///
/// The 2-Input Minimum Node computes elementwise minimum of two tensor inputs with numpy broadcasting rules.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the second
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the first
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined
/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension
/// of the two inputs.
/// @param flags - binary features of the Minimum Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_minimum2(
xnn_subgraph_t subgraph,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags);
/// Define a Squared Difference Node and add it to a Subgraph.
///
/// The Squared Difference Node computes elementwise squared difference of two tensor inputs with numpy broadcasting
/// rules.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the second
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in
/// the @a subgraph with each dimension either equal to the corresponding dimension of the first
/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
/// that dimension.
/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined
/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension
/// of the two inputs.
/// @param flags - binary features of the Squared Difference Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_squared_difference(
xnn_subgraph_t subgraph,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags);
/// Define a Constant Pad Node with static padding specification and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param pre_paddings - number of padding elements to insert before input elements for every dimension. This array
/// must have as many elements as the number of dimensions in the input tensor.
/// @param post_paddings - number of padding elements to insert after input elements for every dimension. This array
/// must have as many elements as the number of dimensions in the input tensor.
/// @param padding_value - constant value used to initialize padding elements.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor with padding.
/// @param flags - binary features of the Constant Pad Node. No supported flags are currently defined.
enum xnn_status xnn_define_static_constant_pad(
xnn_subgraph_t subgraph,
const size_t* pre_paddings,
const size_t* post_paddings,
float padding_value,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Expand Dims Node with and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param num_new_axes - number of new axes of size 1 to be inserted.
/// @param new_axes - The axis positions of the new axes in the expanded dimensions.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor with padding.
/// @param flags - binary features of the Constant Pad Node. No supported flags are currently defined.
enum xnn_status xnn_define_static_expand_dims(
xnn_subgraph_t subgraph,
size_t num_new_axes,
const size_t* new_axes,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Mean Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param num_reduction_axes - number of axes along which mean is computed.
/// @param reduction_axes - axes along which mean is computed.
/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with at least
/// @a num_reduction_axes dimensions defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor defined in the
/// @a subgraph with @a num_reduction_axes fewer dimensions than the input tensor (if
/// XNN_FLAG_KEEP_DIMS is not specified), or has same dimension rank but the dimension at
/// @a reduction_axes reduced to 1 (if XNN_FLAG_KEEP_DIMS is specified).
/// @param flags - binary features of the Mean Node. The only currently supported value is XNN_FLAG_KEEP_DIMS
XNN_DEPRECATED enum xnn_status xnn_define_static_mean(
xnn_subgraph_t subgraph,
size_t num_reduction_axes,
const size_t* reduction_axes,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
enum xnn_reduce_operator {
xnn_reduce_invalid = -1,
xnn_reduce_sum,
xnn_reduce_mean,
};
/// Define a Reduce Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param num_reduction_axes - number of axes along which reduce is computed.
/// @param reduction_axes - axes along which reduce is computed.
/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with at least
/// @a num_reduction_axes dimensions defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor defined in the
/// @a subgraph with @a num_reduction_axes fewer dimensions than the input tensor (if
/// XNN_FLAG_KEEP_DIMS is not specified), or has same dimension rank but the dimension at
/// @a reduction_axes reduced to 1 (if XNN_FLAG_KEEP_DIMS is specified).
/// @param flags - binary features of the Reduce Node. The only currently supported value is XNN_FLAG_KEEP_DIMS
enum xnn_status xnn_define_static_reduce(
xnn_subgraph_t subgraph,
enum xnn_reduce_operator reduce_operator_type,
size_t num_reduction_axes,
const size_t* reduction_axes,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Reduce Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param num_reduction_axes - number of axes along which reduce is computed.
/// @param reduction_axes - axes along which reduce is computed. Negative values
/// are interpreted as offsets from @a
/// num_reduction_axes.
/// @param input_id - Value ID for the input tensor. The input tensor must be a
/// dense tensor with at least @a num_reduction_axes
/// dimensions defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be
/// a dense tensor defined in the @a subgraph with @a
/// num_reduction_axes fewer dimensions than the input tensor
/// (if XNN_FLAG_KEEP_DIMS is not specified), or has same
/// dimension rank but the dimension at
/// @a reduction_axes reduced to 1 (if XNN_FLAG_KEEP_DIMS is
/// specified).
/// @param flags - binary features of the Reduce Node. The only currently
/// supported value is XNN_FLAG_KEEP_DIMS
enum xnn_status xnn_define_static_reduce_v2( //
xnn_subgraph_t subgraph, //
enum xnn_reduce_operator reduce_operator_type, //
size_t num_reduction_axes, //
const int64_t* reduction_axes, //
uint32_t input_id, //
uint32_t output_id, //
uint32_t flags);
/// Define a 2-Input Concatenate Node and add it to a Subgraph.
///
/// The 2-Input Concatenate Node concatenates two tensors along a specified axis.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param axis - the axis to concatenate the two input tensors along. If this is less than zero, the number of
/// dimensions is added to it.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// second input.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// first input.
/// @param output_id - Value ID for the output tensor. The output tensor must be a N-dimensional tensor defined
/// in the @a subgraph with each dimension equal to the dimension of both inputs, except the axis
/// dimension, where it is the sum of the corresponding dimensions of both inputs.
/// @param flags - binary features of the Concatenate Node. No supported flags are currently defined.
enum xnn_status xnn_define_concatenate2(
xnn_subgraph_t subgraph,
int32_t axis,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags);
/// Define a 3-Input Concatenate Node and add it to a Subgraph.
///
/// The 3-Input Concatenate Node concatenates three tensors along a specified axis.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param axis - the axis to concatenate the two input tensors along. If this is less than zero, the number of
/// dimensions is added to it.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// other inputs.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// other inputs.
/// @param input3_id - Value ID for the third input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// other inputs.
/// @param output_id - Value ID for the output tensor. The output tensor must be a N-dimensional tensor defined
/// in the @a subgraph with each dimension equal to the dimension of all inputs, except the axis
/// dimension, where it is the sum of the corresponding dimensions of all inputs.
/// @param flags - binary features of the Concatenate Node. No supported flags are currently defined.
enum xnn_status xnn_define_concatenate3(
xnn_subgraph_t subgraph,
int32_t axis,
uint32_t input1_id,
uint32_t input2_id,
uint32_t input3_id,
uint32_t output_id,
uint32_t flags);
/// Define a 4-Input Concatenate Node and add it to a Subgraph.
///
/// The 4-Input Concatenate Node concatenates four tensors along a specified axis.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param axis - the axis to concatenate the two input tensors along. If this is less than zero, the number of
/// dimensions is added to it.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// other inputs.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// other inputs.
/// @param input3_id - Value ID for the third input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// other inputs.
/// @param input4_id - Value ID for the fourth input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// other inputs.
/// @param output_id - Value ID for the output tensor. The output tensor must be a N-dimensional tensor defined
/// in the @a subgraph with each dimension equal to the dimension of all inputs, except the axis
/// dimension, where it is the sum of the corresponding dimensions of all inputs.
/// @param flags - binary features of the Concatenate Node. No supported flags are currently defined.
enum xnn_status xnn_define_concatenate4(
xnn_subgraph_t subgraph,
int32_t axis,
uint32_t input1_id,
uint32_t input2_id,
uint32_t input3_id,
uint32_t input4_id,
uint32_t output_id,
uint32_t flags);
/// Define a 5-Input Concatenate Node and add it to a Subgraph.
///
/// The 5-Input Concatenate Node concatenates four tensors along a specified axis.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param axis - the axis to concatenate the two input tensors along. If this is less than zero, the number of
/// dimensions is added to it.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// other inputs.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// other inputs.
/// @param input3_id - Value ID for the third input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// other inputs.
/// @param input4_id - Value ID for the fourth input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// other inputs.
/// @param input5_id - Value ID for the fourth input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the
/// other inputs.
/// @param output_id - Value ID for the output tensor. The output tensor must be a N-dimensional tensor defined
/// in the @a subgraph with each dimension equal to the dimension of all inputs, except the axis
/// dimension, where it is the sum of the corresponding dimensions of all inputs.
enum xnn_status xnn_define_concatenate5(
xnn_subgraph_t subgraph,
int32_t axis,
uint32_t input1_id,
uint32_t input2_id,
uint32_t input3_id,
uint32_t input4_id,
uint32_t input5_id,
uint32_t output_id,
uint32_t flags);
/// Define a Copy Sign Node and add it to a Subgraph.
///
/// The Copy Sign Node copies the sign of the second input to the first input.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be defined in the @a subgraph.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor.
/// @param flags - binary features of the Copy Sign Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_copysign(
xnn_subgraph_t subgraph,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags);
/// Define a Copy Node and add it to a Subgraph.
///
/// The Copy Node copies an input tensor to an output tensor.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the first input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Copy Node. No supported flags are currently defined.
enum xnn_status xnn_define_copy(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2-Output Split Node and add it to a Subgraph.
///
/// The 2-Output Split Node splits an input tensor into two output tensors along a specified axis evenly.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param split_dim - the dimension to split the input tensor along. If this is less than zero, the number of
/// dimensions is added to it.
/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the @a
/// subgraph.
/// @param output1_id - Value ID for the first output tensor. The output tensor must be an N-dimensional tensor defined
/// in the @a subgraph with each dimension, except the axis, equal to the corresponding dimension
/// of the second output. The split_dim dimension is half of the input's split_dim.
/// @param output2_id - Value ID for the second output tensor. The output tensor must be an N-dimensional tensor
/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding
/// dimension of the first output. The split_dim dimension is half of the input's split_dim.
/// @param flags - binary features of the Split Node. No supported flags are currently defined.
enum xnn_status xnn_define_even_split2(
xnn_subgraph_t subgraph,
int32_t split_dim,
uint32_t input_id,
uint32_t output1_id,
uint32_t output2_id,
uint32_t flags);
/// Define a 3-Output Split Node and add it to a Subgraph.
///
/// The 3-Output Split Node splits an input tensor into three output tensors along a specified axis evenly.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param split_dim - the dimension to split the input tensor along. If this is less than zero, the number of
/// dimensions is added to it.
/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the @a
/// subgraph.
/// @param output1_id - Value ID for the first output tensor. The output tensor must be an N-dimensional tensor defined
/// in the @a subgraph with each dimension, except the axis, equal to the corresponding dimension
/// of the second and third output. The split_dim dimension is one third of the input's split_dim.
/// @param output2_id - Value ID for the second output tensor. The output tensor must be an N-dimensional tensor
/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding
/// dimension of the first and third output. The split_dim dimension is one third of the input's
/// split_dim.
/// @param output3_id - Value ID for the third output tensor. The output tensor must be an N-dimensional tensor
/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding
/// dimension of the second and third output. The split_dim dimension is one third of the input's
/// split_dim.
/// @param flags - binary features of the Split Node. No supported flags are currently defined.
enum xnn_status xnn_define_even_split3(
xnn_subgraph_t subgraph,
int32_t split_dim,
uint32_t input_id,
uint32_t output1_id,
uint32_t output2_id,
uint32_t output3_id,
uint32_t flags);
/// Define a 4-Output Split Node and add it to a Subgraph.
///
/// The 4-Output Split Node splits an input tensor into four output tensors along a specified axis evenly.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param split_dim - the dimension to split the input tensor along. If this is less than zero, the number of
/// dimensions is added to it.
/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the @a
/// subgraph.
/// @param output1_id - Value ID for the first output tensor. The output tensor must be an N-dimensional tensor defined
/// in the @a subgraph with each dimension, except the axis, equal to the corresponding dimension
/// of the other output tensors. The split_dim dimension is one fourth of the input's split_dim.
/// @param output2_id - Value ID for the second output tensor. The output tensor must be an N-dimensional tensor
/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding
/// dimension of the other output tensors. The split_dim dimension is one fourth of the input's
/// split_dim.
/// @param output3_id - Value ID for the third output tensor. The output tensor must be an N-dimensional tensor
/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding
/// dimension of the other output tensors. The split_dim dimension is one fourth of the input's
/// split_dim.
/// @param output4_id - Value ID for the fourth output tensor. The output tensor must be an N-dimensional tensor
/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding
/// dimension of the other output tensors. The split_dim dimension is one fourth of the input's
/// split_dim.
/// @param flags - binary features of the Split Node. No supported flags are currently defined.
enum xnn_status xnn_define_even_split4(
xnn_subgraph_t subgraph,
int32_t split_dim,
uint32_t input_id,
uint32_t output1_id,
uint32_t output2_id,
uint32_t output3_id,
uint32_t output4_id,
uint32_t flags);
/// Define a Reshape Node with static shape specification and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param num_dims - number of shape dimensions in the output tensor.
/// @param new_shape - shape dimensions of the output tensor.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor with padding.
/// @param flags - binary features of the Reshape Node. No supported flags are currently defined.
enum xnn_status xnn_define_static_reshape(
xnn_subgraph_t subgraph,
size_t num_dims,
const size_t* new_shape,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a 2D Resize Bilinear Node with static output height & width specification and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param new_height - height dimension of the output tensor.
/// @param new_width - width dimension of the output tensor.
/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph
/// with [N, H, W, C] dimensions.
/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph
/// with [N, new_height, new_width, C] dimensions.
/// @param flags - binary features of the 2D Resize Bilinear Node. The only currently supported values are
/// XNN_FLAG_TENSORFLOW_LEGACY_MODE and XNN_FLAG_ALIGN_CORNERS, which are mutually exclusive.
enum xnn_status xnn_define_static_resize_bilinear_2d(
xnn_subgraph_t subgraph,
size_t new_height,
size_t new_width,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a PReLU (Parametric ReLU) Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph
/// with [N, H, W, channels] dimensions.
/// @param slope_id - Value ID for the slope tensor. The slope tensor must be a 1D tensor defined in the @a subgraph with
/// either [1] or [channels] dimensions.
/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph
/// with [N, H, W, channels] dimensions.
/// @param flags - binary features of the PReLU Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_prelu(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t slope_id,
uint32_t output_id,
uint32_t flags);
/// Define a RoPE (Rotary Positional Embeddings) Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param max_tokens - deprecated.
/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph
/// with [batch, tokens, heads, channels] dimensions.
/// @param weights_id - Value ID for the weights tensor. The weights tensor must be a 2D tensor defined in the
/// @a subgraph with [max_tokens, channels] dimensions.
/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph
/// with [batch, tokens, heads, channels] dimensions.
/// @param flags - binary features of the RoPE Node. No supported flags are currently defined.
enum xnn_status xnn_define_rope(
xnn_subgraph_t subgraph,
size_t max_sequence_size,
uint32_t input_id,
uint32_t weights_id,
uint32_t output_id,
uint32_t flags);
/// Define a Abs Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Abs Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_abs(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Bankers' Rounding Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Bankers' Rounding Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_bankers_rounding(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Batch Matrix Multiply Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph. It must be at least 3D. The first N-2 dimensions must match the second input
/// tensor. The last 2 dimensions are [M, K]. If XNN_FLAG_TRANSPOSE_B is not specified, the last
/// dimension must match the second last dimension of the second input tensor. If
/// XNN_FLAG_TRANSPOSE_B is specified, the last dimension must match the last dimension of the
/// second input tensor.
/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined
/// in the @a subgraph. It must be at least 3D. The first N-2 dimensions must match the first input
/// tensor. If XNN_FLAG_TRANSPOSE_B is not specified, the last 2 dimensions are [K, N], and the
/// second last dimension must match the last dimension of the first input tensor. If
/// XNN_FLAG_TRANSPOSE_B is specified, the last 2 dimensions are [N, K], and the last dimension must
/// match the last dimension of the first input tensor.
/// @param output_id - Value ID for the output tensor. The output tensor must be an N-dimensional tensor defined in the
/// @a subgraph. It must be at least 3D. The first N-2 dimensions must match the first and second
/// input tensors . The last 2 dimensions must be [M, N].
/// @param flags - binary features of the Batch Matrix Multiply Node. The only currently supported value is
/// XNN_FLAG_TRANSPOSE_B.
enum xnn_status xnn_define_batch_matrix_multiply(
xnn_subgraph_t subgraph,
uint32_t input1_id,
uint32_t input2_id,
uint32_t output_id,
uint32_t flags);
/// Define a Ceiling Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Ceiling Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_ceiling(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Clamp Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param output_min - lower bound for clipping output values.
/// @param output_max - upper bound for clipping output values.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Clamp Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_clamp(
xnn_subgraph_t subgraph,
float output_min,
float output_max,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define an ELU (Exponential Linear Unit) Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param alpha - scale factor for negative output elements.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the ELU Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_elu(
xnn_subgraph_t subgraph,
float alpha,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Exp Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Exp Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_exp(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Floor Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Floor Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_floor(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define an GELU (Gaussian Error Linear Unit) Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the GELU Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_gelu(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a HardSwish Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the HardSwish Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_hardswish(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Leaky ReLU Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param negative_slope - scale factor for negative input elements.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Leaky ReLU Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_leaky_relu(
xnn_subgraph_t subgraph,
float negative_slope,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Log Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Log Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_log(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Negate Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Negate Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_negate(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Sigmoid Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Sigmoid Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_sigmoid(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a SoftMax Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph, and have at
/// least one dimension.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the SoftMax Node. No supported flags are currently defined.
enum xnn_status xnn_define_softmax(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Space To Depth 2D Node and add it to a Subgraph.
///
/// The Space To Depth 2D Node rearranges blocks of spatial data into blocks (a reverse transform to Depth To Space 2D).
/// For a given input pixel, an output square of pixels with side @a block_size is formed from values in the
/// corresponding number of its channels. The output depth is therefore @a block_size x @a block_size times greater
/// than that of the input.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param block_size - the size of the spatial block.
/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph
/// with [N, IH * block_size, IW * block_size, OC] dimensions.
/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph
/// with [N, IH, IW, OC * block_size * block_size] dimensions.
/// @param flags - binary features of the input_channels Node. No supported flags are currently defined.
enum xnn_status xnn_define_space_to_depth_2d(
xnn_subgraph_t subgraph,
uint32_t block_size,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Square Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Square Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_square(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Square Root Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Square Root Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_square_root(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Reciprocal Square Root Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be
/// defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be
/// defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Square Root Node. No supported flags
/// are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_reciprocal_square_root(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
enum xnn_status xnn_define_static_slice(
xnn_subgraph_t subgraph,
size_t num_dims,
const size_t* offsets,
const size_t* sizes,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Static Slice Node add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param num_dims - number of shape dimensions in the input and output tensor.
/// @param offsets - offsets in each dimension of the input tensor. This array must have @a num_dims elements. Can be
/// negative meaning that the offset is relative to the end of the dimension.
/// @param sizes - size of each dimension in output tensor. This array must have @a num_dims elements.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// dimensions must match @a sizes.
/// @param flags - binary features of the Static Slice Node. No supported flags are currently defined.
enum xnn_status xnn_define_static_slice_v2( //
xnn_subgraph_t subgraph, //
size_t num_dims, //
const int64_t* offsets, //
const size_t* sizes, //
uint32_t input_id, //
uint32_t output_id, //
uint32_t flags);
/// Define a Static Transpose Node and add it to a Subgraph.
///
/// The Static Transpose Node applies a generalized transpose to the input tensor using the permuation in perm.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in
/// the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be an N-dimensional tensor defined
/// in the @a subgraph with each dimension equal to its corresponding permuted input dimension.
/// @param num_dims - the number of permutation dimensions. This must be equal to the number of input dimensions.
/// @param perm - The permutation of the axis of the input tensor. The perm array must must contain 0 to N-1 in the
/// permuted order.
/// @param flags - binary features of the Static Transpose Node. No supported flags are currently defined.
enum xnn_status xnn_define_static_transpose(
xnn_subgraph_t subgraph,
size_t num_dims,
const size_t* perm,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Define a Tanh Node and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
/// shape must match the shape of the input tensor.
/// @param flags - binary features of the Tanh Node. No supported flags are currently defined.
XNN_DEPRECATED enum xnn_status xnn_define_tanh(
xnn_subgraph_t subgraph,
uint32_t input_id,
uint32_t output_id,
uint32_t flags);
/// Code cache is a cache for JIT generated code.
typedef struct xnn_code_cache* xnn_code_cache_t;
/// Weights cache can be finalized in these ways:
enum xnn_weights_cache_finalization_kind {
/// Weights cache is finalized, no insert operations into the weights cache is allowed, even if the "inserted"
/// weights already exist in thee cache. Weights cache memory will also be trimmed to page boundary and set to
/// read-only (to prevent writes).
xnn_weights_cache_finalization_kind_hard,
/// Weights cache will be finalized with some extra space at the end, this allows for "inserting" into the cache only
/// if the weights are already in the cache, and errors on inserting uncached weights. There is memory overhead.
xnn_weights_cache_finalization_kind_soft,
};
/// A combination of multiple factors to uniquely locate the weights cache.
struct xnn_weights_cache_look_up_key {
/// The unique seed for each ukernel. It is guaranteed that each ukernel provides
/// a consistent and identical seed.
uint32_t seed;
/// Pointer to the original kernel.
const void* kernel;
/// Pointer to the original bias, could be NULL.
const void* bias;
};
/// A group of function pointers to manage weights cache. All functions may be
/// called on multi threads.
struct xnn_weights_cache_provider {
/// User-specified pointer that will be passed as-is to all functions in this
/// structure.
void* context;
/// Looks up the tuple of {cache_key, kernel, bias} in the cache. If it is found,
/// returns the offset to the found entry for reuse. Otherwise, returns SIZE_MAX.
/// @param context - The user-specified pointer from xnn_weights_cache_provider structure.
/// @param cache_key - The key used to locate the weights cache entry.
size_t (*look_up)(void* context, const struct xnn_weights_cache_look_up_key* cache_key);
/// Ensures that cache has enough space for `n` bytes. Returns the address to
/// store weight cache. Returns NULL if fails to reserve space.
/// @param context - The user-specified pointer from xnn_weights_cache_provider structure.
/// @param n - size to be reserved.
void* (*reserve_space)(void* context, size_t n);
/// Looks up packed weights at `ptr` in the cache. If it is found, reuse it.
/// Otherwise, it is added to the cache. Returns the offset to the cache.
/// @param context - The user-specified pointer from xnn_weights_cache_provider structure.
/// @param cache_key - The key used to locate the weights cache entry.
/// @param ptr - pointer pointing to the packed weight.
/// @param size - size of the packed weight.
size_t (*look_up_or_insert)(void* context, const struct xnn_weights_cache_look_up_key* cache_key, void* ptr, size_t size);
/// Returns whether the cache is finalized.
/// @param context - The user-specified pointer from xnn_weights_cache_provider structure.
bool (*is_finalized)(void* context);
/// Returns the absolute pointer corresponding to `offset`, where the offset is returned from
/// `look_up` or `get_or_insert`. This function must be called after finalize.
/// @param context - The user-specified pointer from xnn_weights_cache_provider structure.
/// @param offset - offset to the start of internal buffer
void* (*offset_to_addr)(void* context, size_t offset);
/// Destroy a weights cache object, as well as memory used for the cache.
/// @param context - The user-specified pointer from xnn_weights_cache_provider structure.
enum xnn_status (*delete_cache)(void* context);
};
/// Weights cache is a cache for packed weights. It can be reused between runtimes.
typedef struct xnn_weights_cache_provider* xnn_weights_cache_t;
/// Create a weights cache object specifying the initial size of weights cache (in bytes).
///
/// @param[in] size - initial capacity of the weights cache (in bytes), i.e. it can hold size bytes without growing.
/// @param weights_cache_out - pointer to the variable that will be initialized to a handle to the weights cache provider
/// upon successful return. Once created, the weights cache provider can be shared between
/// different Runtime objects.
enum xnn_status xnn_create_weights_cache_with_size(size_t size, xnn_weights_cache_t* weights_cache_out);
enum xnn_status xnn_create_weights_cache(xnn_weights_cache_t* weights_cache_out);
/// Finalizes the weights cache. The kind of finalization is specified by `finalization_kind`.
/// @param weights_cache - the weights cache object to finalize.
/// @param finalization_kind - the kind of finalization.
enum xnn_status xnn_finalize_weights_cache(
xnn_weights_cache_t weights_cache,
enum xnn_weights_cache_finalization_kind finalization_kind);
// Wrapper function of the function pointers in `xnn_weights_cache_t`.
bool xnn_weights_cache_is_finalized(xnn_weights_cache_t cache);
/// Destroy a weights cache object, as well as memory used for the cache.
/// @param weights_cache - the weights cache object to destroy.
enum xnn_status xnn_delete_weights_cache(xnn_weights_cache_t weights_cache);
typedef struct xnn_workspace* xnn_workspace_t;
/// Create a workspace object.
/// @param workspace_out - pointer to the variable that will be initialized to a handle to the workspace object upon
/// successful return. Once created, the workspace can be shared between different Runtime
/// objects.
enum xnn_status xnn_create_workspace(xnn_workspace_t* workspace_out);
/// Destroy a workspace object, as well as memory used by the workspace. Object destruction can be deferred until all
/// Runtime objects created with this workspace are destroyed.
/// @param workspace - the workspace object to destroy.
enum xnn_status xnn_release_workspace(xnn_workspace_t workspace);
/// Runtime is a combination of an execution plan for subgraph Nodes and a memory manager for subgraph Values.
typedef struct xnn_runtime* xnn_runtime_t;
enum xnn_profile_info {
/// Returns a size_t containing the number of operators.
xnn_profile_info_num_operators,
/// Returns a char[] containing the null character separated names of all operators.
xnn_profile_info_operator_name,
/// Returns a uint64_t[] with the runtimes of all operators in the same order as xnn_profile_info_operator_name.
xnn_profile_info_operator_timing,
};
/// Return profile information for all operators.
///
/// @param runtime - a Runtime object created with @ref xnn_create_runtime, @ref xnn_create_runtime_v2 or
/// @ref xnn_create_runtime_v3.
/// @param param_name - type of profile information required.
/// @param param_value_size - the size in bytes of memory pointed to by param_value. If this is not sufficient then
/// param_value_size_ret will be set to the required size and xnn_status_out_of_memory will be
/// returned.
/// @param param_value - a pointer to memory location where appropriate values for a given param_value will be written.
/// @param param_value_size_ret - returns number of bytes required to write the result if param_value_size is not
/// sufficient.
enum xnn_status xnn_get_runtime_profiling_info(xnn_runtime_t runtime,
enum xnn_profile_info param_name,
size_t param_value_size,
void* param_value,
size_t* param_value_size_ret);
/// Create a Runtime object from a subgraph.
///
/// @param subgraph - a Subgraph object with all Values and Nodes that would be handled by the runtime. No Values or
/// Nodes can be added to the runtime once it is constructed.
/// @param weights_cache - a cache for packed weights. The runtime will look up and reuse packed weights in this cache,
/// this will reduce memory allocated for packed weights.
/// @param workspace - a workspace to hold internal tensors. The runtime will allocate space used for internal tensors
/// and track them using workspace. Workspace can be shared and reused across different runtimes. If
/// workspace is NULL, there will be no sharing: each runtime has its own workspace.
/// @param threadpool - the thread pool to be used for parallelisation of computations in the runtime. If the thread
/// pool is NULL, the computation would run on the caller thread without parallelization.
/// @param flags - binary features of the runtime. The only currently supported values are
/// XNN_FLAG_HINT_SPARSE_INFERENCE, XNN_FLAG_HINT_FP16_INFERENCE, XNN_FLAG_FORCE_FP16_INFERENCE,
/// XNN_FLAG_YIELD_WORKERS, and XNN_FLAG_TRANSIENT_INDIRECTION_BUFFER. If XNN_FLAG_YIELD_WORKERS is
/// specified, worker threads would be yielded to the system scheduler after processing the last operator
/// in the Runtime. If XNN_FLAG_TRANSIENT_INDIRECTION_BUFFER is specified, convolution operators will
/// initialize indirection buffers on each inference run using temporary memory in the workspace, instead
/// of initializing persistent indirection buffers once.
/// @param runtime_out - pointer to the variable that will be initialized with a handle to the Runtime object upon
/// successful return. Once constructed, the Runtime object is independent of the Subgraph object
/// used to create it.
enum xnn_status xnn_create_runtime_v4(
xnn_subgraph_t subgraph,
xnn_weights_cache_t weights_cache,
xnn_workspace_t workspace,
pthreadpool_t threadpool,
uint32_t flags,
xnn_runtime_t* runtime_out);
enum xnn_status xnn_create_runtime_v3(
xnn_subgraph_t subgraph,
xnn_weights_cache_t weights_cache,
pthreadpool_t threadpool,
uint32_t flags,
xnn_runtime_t* runtime_out);
enum xnn_status xnn_create_runtime_v2(
xnn_subgraph_t subgraph,
pthreadpool_t threadpool,
uint32_t flags,
xnn_runtime_t* runtime_out);
enum xnn_status xnn_create_runtime(
xnn_subgraph_t subgraph,
xnn_runtime_t* runtime_out);
struct xnn_external_value {
uint32_t id;
void* data;
};
/// Reshape an external value.
///
/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on
/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be
/// created for the Value.
/// @param num_dims - number of dimensions in the shape.
/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL.
/// XNNPACK does not keep any pointers to this array after the function returns.
enum xnn_status xnn_reshape_external_value(
xnn_runtime_t runtime,
uint32_t external_id,
size_t num_dims,
const size_t* dims);
/// Get the external value shape.
///
/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on
/// the Subgraph creation. The external ID can not be XNN_INVALID_VALUE_ID.
/// @param num_dims - A valid pointer into which the number of dimensions in the shape will be written. It can not be larger than XNN_MAX_TENSOR_DIMS.
/// @param dims - pointer to an array of @a num_dims shape dimensions. This pointer can't be NULL. It must be large enough to hold
/// at least @a num_dims elements. XNNPACK does not keep any pointers to this array after the function returns.
enum xnn_status xnn_get_external_value_shape(
xnn_runtime_t runtime,
uint32_t external_id,
size_t* num_dims,
size_t* dims);
/// Reshape the XNNPACK runtime.
///
/// Propagates the shapes of input tensors through the graph to determine the shapes of intermediate and output tensors.
/// Memory is allocated if required. Output tensor shapes are returned by xnn_get_external_value_shape.
///
/// @param runtime - a Runtime object created with @ref xnn_create_runtime or @ref xnn_create_runtime_v2.
enum xnn_status xnn_reshape_runtime(
xnn_runtime_t runtime);
/// Deprecated. Use xnn_reshape_runtime and xnn_setup_runtime_v2.
///
/// Setup data pointers for external inputs and outputs in a Runtime object and
/// allocate memory.
///
/// @param runtime - a Runtime object created with @ref xnn_create_runtime or @ref xnn_create_runtime_v2.
/// @param num_external_values - the number of external inputs and outputs specified in this call. This number must
/// match the number of external inputs and outputs in the runtime, i.e. all external
/// inputs and outputs in the runtime must be specified in one call.
/// @param external_values - array with location information for all external inputs and outputs in the runtime.
enum xnn_status xnn_setup_runtime(
xnn_runtime_t runtime,
size_t num_external_values,
const struct xnn_external_value* external_values);
/// Setup data pointers for external inputs and outputs in a Runtime object.
/// Should be called after xnn_reshape_runtime.
///
/// @param runtime - a Runtime object created with @ref xnn_create_runtime or @ref xnn_create_runtime_v2.
/// @param num_external_values - the number of external inputs and outputs specified in this call. This number must
/// match the number of external inputs and outputs in the runtime, i.e. all external
/// inputs and outputs in the runtime must be specified in one call.
/// @param external_values - array with location information for all external inputs and outputs in the runtime.
enum xnn_status xnn_setup_runtime_v2(
xnn_runtime_t runtime,
size_t num_external_values,
const struct xnn_external_value* external_values);
/// Execute forward pass for all operators in the runtime.
///
/// @param runtime - the Runtime object with the execution plan to invoke.
enum xnn_status xnn_invoke_runtime(
xnn_runtime_t runtime);
/// Destroy a Runtime object, as well as operators and memory associated with it.
///
/// @param runtime - the Runtime object to destroy.
enum xnn_status xnn_delete_runtime(
xnn_runtime_t runtime);
typedef struct xnn_operator* xnn_operator_t;
enum xnn_status xnn_run_operator(
xnn_operator_t op,
pthreadpool_t threadpool);
enum xnn_status xnn_delete_operator(
xnn_operator_t op);
/// Operator API:
/// - create operator will create and populate a xnn_operator_t
/// - reshape operator will update fields in xnn_operator_t with shape/dimensions and parallelization information
/// - setup operator will update pointers to input and outputs
/// Each supported operator must have a create, reshape, and setup function. (Optionally a run function.)
/// Operators listed below are in alphabetical order by operator name; within each operator, we sort alphabetically by
/// data layout and type. We also group create, reshape, setup (and optionally run) functions of each operator together.
enum xnn_status xnn_create_binary_elementwise_nd(
enum xnn_binary_operator type,
enum xnn_datatype datatype,
const struct xnn_quantization_params* input1_quantization,
const struct xnn_quantization_params* input2_quantization,
const struct xnn_quantization_params* output_quantization,
uint32_t flags,
xnn_operator_t* binary_op_out);
enum xnn_status xnn_reshape_binary_elementwise_nd(
xnn_operator_t binary_op,
size_t num_input1_dims,
const size_t* input1_shape,
size_t num_input2_dims,
const size_t* input2_shape,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_binary_elementwise_nd(
xnn_operator_t binary_op,
const void* input1,
const void* input2,
void* output);
enum xnn_status xnn_run_binary_elementwise_nd(
enum xnn_binary_operator type,
enum xnn_datatype datatype,
const struct xnn_quantization_params* input1_quantization,
const struct xnn_quantization_params* input2_quantization,
const struct xnn_quantization_params* output_quantization,
uint32_t flags,
size_t num_input1_dims,
const size_t* input1_shape,
size_t num_input2_dims,
const size_t* input2_shape,
const void* input1,
const void* input2,
void* output,
pthreadpool_t threadpool);
enum xnn_status xnn_create_unary_elementwise_nc(
enum xnn_unary_operator op_type,
enum xnn_datatype input_datatype,
enum xnn_datatype output_datatype,
const union xnn_unary_params* params,
const struct xnn_quantization_params* input_quantization,
const struct xnn_quantization_params* output_quantization,
uint32_t flags,
xnn_operator_t* op_out);
enum xnn_status xnn_reshape_unary_elementwise_nc(
xnn_operator_t op,
size_t batch_size,
size_t channels,
size_t input_stride,
size_t output_stride,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_unary_elementwise_nc(
xnn_operator_t op,
const void* input,
void* output);
enum xnn_status xnn_run_unary_elementwise_nc(
// create parameters
enum xnn_unary_operator op_type,
enum xnn_datatype input_datatype,
enum xnn_datatype output_datatype,
const union xnn_unary_params* params,
const struct xnn_quantization_params* input_quantization,
const struct xnn_quantization_params* output_quantization,
uint32_t flags,
// reshape parameters
size_t batch_size,
size_t channels,
size_t input_stride,
size_t output_stride,
pthreadpool_t threadpool,
// setup parameters
const void* input,
void* output);
enum xnn_status xnn_create_argmax_pooling2d_nhwc_f32(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t flags,
xnn_operator_t* argmax_pooling_op_out);
enum xnn_status xnn_reshape_argmax_pooling2d_nhwc_f32(
xnn_operator_t argmax_pooling_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_argmax_pooling2d_nhwc_f32(
xnn_operator_t argmax_pooling_op,
void* workspace,
const float* input,
float* output,
uint32_t* index);
enum xnn_status xnn_create_average_pooling2d_nhwc_f16(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
float output_min,
float output_max,
uint32_t flags,
xnn_operator_t* average_pooling_op_out);
enum xnn_status xnn_reshape_average_pooling2d_nhwc_f16(
xnn_operator_t average_pooling_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_average_pooling2d_nhwc_f16(
xnn_operator_t average_pooling_op,
void* workspace,
const void* input,
void* output);
enum xnn_status xnn_create_average_pooling2d_nhwc_f32(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
float output_min,
float output_max,
uint32_t flags,
xnn_operator_t* average_pooling_op_out);
enum xnn_status xnn_reshape_average_pooling2d_nhwc_f32(
xnn_operator_t average_pooling_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_average_pooling2d_nhwc_f32(
xnn_operator_t average_pooling_op,
void* workspace,
const float* input,
float* output);
enum xnn_status xnn_create_average_pooling2d_nhwc_qu8(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
uint8_t input_zero_point,
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
xnn_operator_t* average_pooling_op_out);
enum xnn_status xnn_reshape_average_pooling2d_nhwc_qu8(
xnn_operator_t average_pooling_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_average_pooling2d_nhwc_qu8(
xnn_operator_t average_pooling_op,
void* workspace,
const uint8_t* input,
uint8_t* output);
enum xnn_status xnn_create_batch_matrix_multiply_nc_f16(
uint32_t flags,
xnn_operator_t* batch_matrix_multiply_op);
enum xnn_status xnn_reshape_batch_matrix_multiply_nc_f16(
xnn_operator_t batch_matrix_multiply_op, size_t num_batch_dims,
const size_t* batch_dims_a, const size_t* batch_dims_b, size_t m, size_t k,
size_t n, size_t* workspace_size, size_t* workspace_alignment,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_batch_matrix_multiply_nc_f16(
xnn_operator_t batch_matrix_multiply_op, void* workspace,
const void* input_a, const void* input_b, void* output);
enum xnn_status xnn_create_batch_matrix_multiply_nc_f32(
uint32_t flags, xnn_operator_t* batch_matrix_multiply_op);
enum xnn_status xnn_create_batch_matrix_multiply_nc_f32_const_weights(
size_t batch_size_b, size_t k, size_t n, const float* data_b,
uint32_t flags, xnn_operator_t* batch_matrix_multiply_op);
enum xnn_status xnn_reshape_batch_matrix_multiply_nc_f32(
xnn_operator_t batch_matrix_multiply_op, size_t num_batch_dims,
const size_t* batch_dims_a, const size_t* batch_dims_b, size_t m, size_t k,
size_t n, size_t* workspace_size, size_t* workspace_alignment,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_batch_matrix_multiply_nc_f32(
xnn_operator_t batch_matrix_multiply_op, void* workspace,
const float* input_a, const float* input_b, float* output);
enum xnn_status xnn_create_batch_matrix_multiply_nc_qd8_f32_qc8w(
size_t batch_size_b, size_t k, size_t n, const int8_t* data_b,
const float* scale_b, uint32_t flags,
xnn_operator_t* batch_matrix_multiply_op);
enum xnn_status xnn_reshape_batch_matrix_multiply_nc_qd8_f32_qc8w(
xnn_operator_t batch_matrix_multiply_op, size_t num_batch_dims,
const size_t* batch_dims_a, const size_t* batch_dims_b, size_t m, size_t k,
size_t n, pthreadpool_t threadpool);
enum xnn_status xnn_setup_batch_matrix_multiply_nc_qd8_f32_qc8w(
xnn_operator_t batch_matrix_multiply_op, const int8_t* input_a,
const struct xnn_quantization_params* quantization_params,
float* output);
enum xnn_status xnn_create_channel_shuffle_nc_x8(
size_t groups,
size_t group_channels,
size_t input_stride,
size_t output_stride,
uint32_t flags,
xnn_operator_t* channel_shuffle_op_out);
enum xnn_status xnn_reshape_channel_shuffle_nc_x8(
xnn_operator_t channel_shuffle_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_channel_shuffle_nc_x8(
xnn_operator_t channel_shuffle_op,
const void* input,
void* output);
enum xnn_status xnn_create_channel_shuffle_nc_x32(
size_t groups,
size_t group_channels,
size_t input_stride,
size_t output_stride,
uint32_t flags,
xnn_operator_t* channel_shuffle_op_out);
enum xnn_status xnn_reshape_channel_shuffle_nc_x32(
xnn_operator_t channel_shuffle_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_channel_shuffle_nc_x32(
xnn_operator_t channel_shuffle_op,
const void* input,
void* output);
enum xnn_status xnn_create_constant_pad_nd_x8(
const void* padding_value,
uint32_t flags,
xnn_operator_t* constant_pad_op_out);
enum xnn_status xnn_reshape_constant_pad_nd_x8(
xnn_operator_t constant_pad_op,
size_t num_dims,
const size_t* input_shape,
const size_t* pre_padding,
const size_t* post_padding,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_constant_pad_nd_x8(
xnn_operator_t constant_pad_op,
const void* input,
void* output);
enum xnn_status xnn_run_constant_pad_nd_x8(
uint32_t flags,
size_t num_dims,
const size_t* input_shape,
const size_t* pre_paddings,
const size_t* post_paddings,
const void* input,
void* output,
const void* padding_value,
pthreadpool_t threadpool);
enum xnn_status xnn_create_constant_pad_nd_x16(
const void* padding_value,
uint32_t flags,
xnn_operator_t* constant_pad_op_out);
enum xnn_status xnn_reshape_constant_pad_nd_x16(
xnn_operator_t constant_pad_op,
size_t num_dims,
const size_t* input_shape,
const size_t* pre_padding,
const size_t* post_padding,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_constant_pad_nd_x16(
xnn_operator_t constant_pad_op,
const void* input,
void* output);
enum xnn_status xnn_run_constant_pad_nd_x16(
uint32_t flags,
size_t num_dims,
const size_t* input_shape,
const size_t* pre_paddings,
const size_t* post_paddings,
const void* input,
void* output,
const void* padding_value,
pthreadpool_t threadpool);
enum xnn_status xnn_create_constant_pad_nd_x32(
const void* padding_value,
uint32_t flags,
xnn_operator_t* constant_pad_op_out);
enum xnn_status xnn_reshape_constant_pad_nd_x32(
xnn_operator_t constant_pad_op,
size_t num_dims,
const size_t* input_shape,
const size_t* pre_padding,
const size_t* post_padding,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_constant_pad_nd_x32(
xnn_operator_t constant_pad_op,
const void* input,
void* output);
enum xnn_status xnn_run_constant_pad_nd_x32(
uint32_t flags,
size_t num_dims,
const size_t* input_shape,
const size_t* pre_paddings,
const size_t* post_paddings,
const void* input,
void* output,
const void* padding_value,
pthreadpool_t threadpool);
enum xnn_status xnn_create_convert_nc_f16_qd8(
uint32_t flags,
xnn_operator_t* convert_op_out);
enum xnn_status xnn_reshape_convert_nc_f16_qd8(
xnn_operator_t convert_op,
size_t batch_size,
size_t channels,
size_t input_stride,
size_t output_stride,
pthreadpool_t threadpool);
// quantization_params must be padded with at least XNN_EXTRA_QUANTIZATION_PARAMS entries.
enum xnn_status xnn_setup_convert_nc_f16_qd8(
xnn_operator_t convert_op,
const void* input,
int8_t* output,
struct xnn_quantization_params* quantization_params);
enum xnn_status xnn_create_convert_nc_f32_qd8(
uint32_t flags,
xnn_operator_t* convert_op_out);
enum xnn_status xnn_reshape_convert_nc_f32_qd8(
xnn_operator_t convert_op,
size_t batch_size,
size_t channels,
size_t input_stride,
size_t output_stride,
pthreadpool_t threadpool);
// quantization_params must be padded with at least XNN_EXTRA_QUANTIZATION_PARAMS entries.
enum xnn_status xnn_setup_convert_nc_f32_qd8(
xnn_operator_t convert_op,
const float* input,
int8_t* output,
struct xnn_quantization_params* quantization_params);
XNN_DEPRECATED enum xnn_status xnn_run_convert_nc_f32_f16(
size_t channels,
size_t input_stride,
size_t output_stride,
size_t batch_size,
const float* input,
void* output,
uint32_t flags,
pthreadpool_t threadpool);
enum xnn_status xnn_create_convolution2d_nchw_f16(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
const void* kernel,
const void* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out);
enum xnn_status xnn_reshape_convolution2d_nchw_f16(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_convolution2d_nchw_f16(
xnn_operator_t convolution_op,
const void* input,
void* output);
enum xnn_status xnn_create_convolution2d_nchw_f32(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
const float* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out);
enum xnn_status xnn_reshape_convolution2d_nchw_f32(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_convolution2d_nchw_f32(
xnn_operator_t convolution_op,
const float* input,
float* output);
enum xnn_status xnn_create_convolution2d_nhwc_f16(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
const void* kernel,
const void* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out);
enum xnn_status xnn_reshape_convolution2d_nhwc_f16(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_convolution2d_nhwc_f16(
xnn_operator_t convolution_op,
void* workspace,
const void* input,
void* output);
enum xnn_status xnn_create_convolution2d_nhwc_f32(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
const float* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out);
enum xnn_status xnn_create_convolution2d_nhwc_f32_f16(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
const void* kernel,
const void* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out);
// Forward declare.
struct xnn_post_operation;
/// Deprecated
enum xnn_status xnn_create_fused_convolution2d_nhwc_f32(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
const float* kernel,
const float* bias,
size_t num_post_operations,
struct xnn_post_operation* post_operations,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out);
enum xnn_status xnn_reshape_convolution2d_nhwc_f32(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_convolution2d_nhwc_f32(
xnn_operator_t convolution_op,
void* workspace,
const float* input,
float* output);
enum xnn_status xnn_create_convolution2d_nhwc_qd8_f16_qc8w(
uint32_t input_padding_top, uint32_t input_padding_right,
uint32_t input_padding_bottom, uint32_t input_padding_left,
uint32_t kernel_height, uint32_t kernel_width, uint32_t subsampling_height,
uint32_t subsampling_width, uint32_t dilation_height,
uint32_t dilation_width, uint32_t groups, size_t group_input_channels,
size_t group_output_channels, size_t input_channel_stride,
size_t output_channel_stride, const float* kernel_scale,
const int8_t* kernel, const float* bias, float output_min, float output_max,
uint32_t flags, xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache, xnn_operator_t* convolution_op_out);
enum xnn_status xnn_create_convolution2d_nhwc_qd8_f32_qc8w(
uint32_t input_padding_top, uint32_t input_padding_right,
uint32_t input_padding_bottom, uint32_t input_padding_left,
uint32_t kernel_height, uint32_t kernel_width, uint32_t subsampling_height,
uint32_t subsampling_width, uint32_t dilation_height,
uint32_t dilation_width, uint32_t groups, size_t group_input_channels,
size_t group_output_channels, size_t input_channel_stride,
size_t output_channel_stride, const float* kernel_scale,
const int8_t* kernel, const float* bias, float output_min, float output_max,
uint32_t flags, xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache, xnn_operator_t* convolution_op_out);
enum xnn_status xnn_create_convolution2d_nhwc_qs8(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
int8_t input_zero_point,
float input_scale,
float kernel_scale,
const int8_t* kernel,
const int32_t* bias,
int8_t output_zero_point,
float output_scale,
int8_t output_min,
int8_t output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out);
enum xnn_status xnn_reshape_convolution2d_nhwc_qd8_f16_qc8w(
xnn_operator_t convolution_op, size_t batch_size, size_t input_height,
size_t input_width, size_t* workspace_size, size_t* workspace_alignment,
size_t* output_height_out, size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_reshape_convolution2d_nhwc_qd8_f32_qc8w(
xnn_operator_t convolution_op, size_t batch_size, size_t input_height,
size_t input_width, size_t* workspace_size, size_t* workspace_alignment,
size_t* output_height_out, size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_reshape_convolution2d_nhwc_qs8(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_convolution2d_nhwc_qd8_f16_qc8w(
xnn_operator_t convolution_op, void* workspace, const int8_t* input,
void* output,
const struct xnn_quantization_params* quantization_params);
enum xnn_status xnn_setup_convolution2d_nhwc_qd8_f32_qc8w(
xnn_operator_t convolution_op, void* workspace, const int8_t* input,
float* output,
const struct xnn_quantization_params* quantization_params);
enum xnn_status xnn_setup_convolution2d_nhwc_qs8(
xnn_operator_t convolution_op,
void* workspace,
const int8_t* input,
int8_t* output);
enum xnn_status xnn_create_convolution2d_nhwc_qs8_qc8w(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
int8_t input_zero_point,
float input_scale,
const float* kernel_scale,
const int8_t* kernel,
const int32_t* bias,
int8_t output_zero_point,
float output_scale,
int8_t output_min,
int8_t output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out);
enum xnn_status xnn_reshape_convolution2d_nhwc_qs8_qc8w(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_convolution2d_nhwc_qs8_qc8w(
xnn_operator_t convolution_op,
void* workspace,
const int8_t* input,
int8_t* output);
enum xnn_status xnn_create_convolution2d_nhwc_qu8(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
uint8_t input_zero_point,
float input_scale,
uint8_t kernel_zero_point,
float kernel_scale,
const uint8_t* kernel,
const int32_t* bias,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out);
enum xnn_status xnn_reshape_convolution2d_nhwc_qu8(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_convolution2d_nhwc_qu8(
xnn_operator_t convolution_op,
void* workspace,
const uint8_t* input,
uint8_t* output);
enum xnn_status xnn_create_copy_nc_x8(
uint32_t flags,
xnn_operator_t* copy_op_out);
enum xnn_status xnn_reshape_copy_nc_x8(
xnn_operator_t copy_op,
size_t batch_size,
size_t channels,
size_t input_stride,
size_t output_stride,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_copy_nc_x8(
xnn_operator_t copy_op,
const void* input,
void* output);
enum xnn_status xnn_create_copy_nc_x16(
uint32_t flags,
xnn_operator_t* copy_op_out);
enum xnn_status xnn_reshape_copy_nc_x16(
xnn_operator_t copy_op,
size_t batch_size,
size_t channels,
size_t input_stride,
size_t output_stride,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_copy_nc_x16(
xnn_operator_t copy_op,
const void* input,
void* output);
enum xnn_status xnn_create_copy_nc_x32(
uint32_t flags,
xnn_operator_t* copy_op_out);
enum xnn_status xnn_reshape_copy_nc_x32(
xnn_operator_t copy_op,
size_t batch_size,
size_t channels,
size_t input_stride,
size_t output_stride,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_copy_nc_x32(
xnn_operator_t copy_op,
const void* input,
void* output);
enum xnn_status xnn_run_copy_nc_x32(
size_t channels,
size_t input_stride,
size_t output_stride,
size_t batch_size,
const uint32_t* input,
uint32_t* output,
uint32_t flags,
pthreadpool_t threadpool);
enum xnn_status xnn_create_deconvolution2d_nhwc_f16(
uint32_t output_padding_top,
uint32_t output_padding_right,
uint32_t output_padding_bottom,
uint32_t output_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
const void* kernel,
const void* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* deconvolution_op_out);
enum xnn_status xnn_reshape_deconvolution2d_nhwc_f16(
xnn_operator_t deconvolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
uint32_t adjustment_height,
uint32_t adjustment_width,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_deconvolution2d_nhwc_f16(
xnn_operator_t deconvolution_op,
const void* input,
void* output);
enum xnn_status xnn_create_deconvolution2d_nhwc_f32(
uint32_t output_padding_top,
uint32_t output_padding_right,
uint32_t output_padding_bottom,
uint32_t output_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
const float* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* deconvolution_op_out);
enum xnn_status xnn_create_deconvolution2d_nhwc_f32_f16(
uint32_t output_padding_top,
uint32_t output_padding_right,
uint32_t output_padding_bottom,
uint32_t output_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
const void* kernel,
const void* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* deconvolution_op_out);
enum xnn_status xnn_reshape_deconvolution2d_nhwc_f32(
xnn_operator_t deconvolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
uint32_t adjustment_height,
uint32_t adjustment_width,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_deconvolution2d_nhwc_f32(
xnn_operator_t deconvolution_op,
const float* input,
float* output);
enum xnn_status xnn_create_deconvolution2d_nhwc_qd8_f32_qc8w(
uint32_t output_padding_top,
uint32_t output_padding_right,
uint32_t output_padding_bottom,
uint32_t output_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
const float* kernel_scale,
const int8_t* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* deconvolution_op_out);
enum xnn_status xnn_reshape_deconvolution2d_nhwc_qd8_f32_qc8w(
xnn_operator_t deconvolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
uint32_t adjustment_height,
uint32_t adjustment_width,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_deconvolution2d_nhwc_qd8_f32_qc8w(
xnn_operator_t deconvolution_op,
const int8_t* input,
float* output,
const struct xnn_quantization_params* quantization_params);
enum xnn_status xnn_create_deconvolution2d_nhwc_qs8(
uint32_t output_padding_top,
uint32_t output_padding_right,
uint32_t output_padding_bottom,
uint32_t output_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
int8_t input_zero_point,
float input_scale,
float kernel_scale,
const int8_t* kernel,
const int32_t* bias,
int8_t output_zero_point,
float output_scale,
int8_t output_min,
int8_t output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* deconvolution_op_out);
enum xnn_status xnn_reshape_deconvolution2d_nhwc_qs8(
xnn_operator_t deconvolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
uint32_t adjustment_height,
uint32_t adjustment_width,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_deconvolution2d_nhwc_qs8(
xnn_operator_t deconvolution_op,
const int8_t* input,
int8_t* output);
enum xnn_status xnn_create_deconvolution2d_nhwc_qs8_qc8w(
uint32_t output_padding_top,
uint32_t output_padding_right,
uint32_t output_padding_bottom,
uint32_t output_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
int8_t input_zero_point,
float input_scale,
const float* kernel_scale,
const int8_t* kernel,
const int32_t* bias,
int8_t output_zero_point,
float output_scale,
int8_t output_min,
int8_t output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* deconvolution_op_out);
enum xnn_status xnn_reshape_deconvolution2d_nhwc_qs8_qc8w(
xnn_operator_t deconvolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
uint32_t adjustment_height,
uint32_t adjustment_width,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_deconvolution2d_nhwc_qs8_qc8w(
xnn_operator_t deconvolution_op,
const int8_t* input,
int8_t* output);
enum xnn_status xnn_create_deconvolution2d_nhwc_qu8(
uint32_t output_padding_top,
uint32_t output_padding_right,
uint32_t output_padding_bottom,
uint32_t output_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
uint8_t input_zero_point,
float input_scale,
uint8_t kernel_zero_point,
float kernel_scale,
const uint8_t* kernel,
const int32_t* bias,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* deconvolution_op_out);
enum xnn_status xnn_reshape_deconvolution2d_nhwc_qu8(
xnn_operator_t deconvolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
uint32_t adjustment_height,
uint32_t adjustment_width,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_deconvolution2d_nhwc_qu8(
xnn_operator_t deconvolution_op,
const uint8_t* input,
uint8_t* output);
enum xnn_status xnn_create_depth_to_space_nchw2nhwc_x16(
uint32_t block_size,
uint32_t flags,
xnn_operator_t* depth_to_space_op_out);
enum xnn_status xnn_reshape_depth_to_space_nchw2nhwc_x16(
xnn_operator_t depth_to_space_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t input_channels,
size_t* output_height_out,
size_t* output_width_out,
size_t* output_channels_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_depth_to_space_nchw2nhwc_x16(
xnn_operator_t depth_to_space_op,
const void* input,
void* output);
enum xnn_status xnn_create_depth_to_space_nchw2nhwc_x32(
uint32_t block_size,
uint32_t flags,
xnn_operator_t* depth_to_space_op_out);
enum xnn_status xnn_reshape_depth_to_space_nchw2nhwc_x32(
xnn_operator_t depth_to_space_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t input_channels,
size_t* output_height_out,
size_t* output_width_out,
size_t* output_channels_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_depth_to_space_nchw2nhwc_x32(
xnn_operator_t depth_to_space_op,
const void* input,
void* output);
enum xnn_status xnn_create_depth_to_space_nhwc_x8(
uint32_t block_size,
uint32_t flags,
xnn_operator_t* depth_to_space_op_out);
enum xnn_status xnn_reshape_depth_to_space_nhwc_x8(
xnn_operator_t depth_to_space_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t input_channels,
size_t* output_height_out,
size_t* output_width_out,
size_t* output_channels_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_depth_to_space_nhwc_x8(
xnn_operator_t depth_to_space_op,
const void* input,
void* output);
enum xnn_status xnn_create_depth_to_space_nhwc_x16(
uint32_t block_size,
uint32_t flags,
xnn_operator_t* depth_to_space_op_out);
enum xnn_status xnn_reshape_depth_to_space_nhwc_x16(
xnn_operator_t depth_to_space_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t input_channels,
size_t* output_height_out,
size_t* output_width_out,
size_t* output_channels_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_depth_to_space_nhwc_x16(
xnn_operator_t depth_to_space_op,
const void* input,
void* output);
enum xnn_status xnn_create_depth_to_space_nhwc_x32(
uint32_t block_size,
uint32_t flags,
xnn_operator_t* depth_to_space_op_out);
enum xnn_status xnn_reshape_depth_to_space_nhwc_x32(
xnn_operator_t depth_to_space_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t input_channels,
size_t* output_height_out,
size_t* output_width_out,
size_t* output_channels_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_depth_to_space_nhwc_x32(
xnn_operator_t depth_to_space_op,
const void* input,
void* output);
enum xnn_status xnn_create_dynamic_fully_connected_nc_f16(
float output_min,
float output_max,
uint32_t flags,
xnn_operator_t* dynamic_fully_connected_op_out);
enum xnn_status xnn_reshape_dynamic_fully_connected_nc_f16(
xnn_operator_t dynamic_fully_connected_op,
size_t batch_size,
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
size_t* workspace_size,
size_t* workspace_alignment,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_dynamic_fully_connected_nc_f16(
xnn_operator_t dynamic_fully_connected_op,
void* workspace,
const void* input,
const void* kernel,
const void* bias,
void* output);
enum xnn_status xnn_create_dynamic_fully_connected_nc_f32(
float output_min,
float output_max,
uint32_t flags,
xnn_operator_t* dynamic_fully_connected_op_out);
enum xnn_status xnn_reshape_dynamic_fully_connected_nc_f32(
xnn_operator_t dynamic_fully_connected_op,
size_t batch_size,
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
size_t* workspace_size,
size_t* workspace_alignment,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_dynamic_fully_connected_nc_f32(
xnn_operator_t dynamic_fully_connected_op,
void* workspace,
const float* input,
const float* kernel,
const float* bias,
float* output);
enum xnn_status xnn_create_fully_connected_nc_f16(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
const void* kernel,
const void* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_reshape_fully_connected_nc_f16(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_fully_connected_nc_f16(
xnn_operator_t fully_connected_op,
const void* input,
void* output);
enum xnn_status xnn_create_fully_connected_nc_f32_f16(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
const void* kernel,
const void* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_create_fully_connected_nc_f32(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
const float* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_reshape_fully_connected_nc_f32_f16(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_reshape_fully_connected_nc_f32(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_fully_connected_nc_f32_f16(
xnn_operator_t fully_connected_op,
const float* input,
float* output);
enum xnn_status xnn_setup_fully_connected_nc_f32(
xnn_operator_t fully_connected_op,
const float* input,
float* output);
enum xnn_status xnn_create_fully_connected_nc_f32_qc4w(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
uint8_t kernel_zero_point,
const float* kernel_scale,
const uint8_t* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_reshape_fully_connected_nc_f32_qc4w(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_fully_connected_nc_f32_qc4w(
xnn_operator_t fully_connected_op,
const float* input,
float* output);
enum xnn_status xnn_create_fully_connected_nc_f32_qc8w(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
const float* kernel_scale,
const int8_t* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_reshape_fully_connected_nc_f32_qc8w(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_fully_connected_nc_f32_qc8w(
xnn_operator_t fully_connected_op,
const float* input,
float* output);
enum xnn_status xnn_create_fully_connected_nc_qd8_f16_qc4w(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
uint8_t kernel_zero_point,
const float* kernel_scale,
const void* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_setup_fully_connected_nc_qd8_f16_qc4w(
xnn_operator_t fully_connected_op,
const int8_t* input,
void* output,
const struct xnn_quantization_params* quantization_params);
enum xnn_status xnn_reshape_fully_connected_nc_qd8_f16_qc4w(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_create_fully_connected_nc_qd8_f16_qb4w(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
size_t block_size,
uint8_t kernel_zero_point,
const uint16_t* kernel_scale,
const void* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_reshape_fully_connected_nc_qd8_f16_qb4w(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_fully_connected_nc_qd8_f16_qb4w(
xnn_operator_t fully_connected_op,
const int8_t* input,
void* output,
const struct xnn_quantization_params* quantization_params);
enum xnn_status xnn_create_fully_connected_nc_qd8_f32_qc4w(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
uint8_t kernel_zero_point,
const float* kernel_scale,
const void* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_setup_fully_connected_nc_qd8_f32_qc4w(
xnn_operator_t fully_connected_op,
const int8_t* input,
float* output,
const struct xnn_quantization_params* quantization_params);
enum xnn_status xnn_reshape_fully_connected_nc_qd8_f32_qc4w(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_create_fully_connected_nc_qd8_f32_qb4w(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
size_t block_size,
uint8_t kernel_zero_point,
const uint16_t* kernel_scale,
const void* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_reshape_fully_connected_nc_qd8_f32_qb4w(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_fully_connected_nc_qd8_f32_qb4w(
xnn_operator_t fully_connected_op,
const int8_t* input,
float* output,
const struct xnn_quantization_params* quantization_params);
enum xnn_status xnn_create_fully_connected_nc_qd8_f16_qc8w(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
const float* kernel_scale,
const int8_t* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_setup_fully_connected_nc_qd8_f16_qc8w(
xnn_operator_t fully_connected_op,
const int8_t* input,
void* output,
const struct xnn_quantization_params* quantization_params);
enum xnn_status xnn_reshape_fully_connected_nc_qd8_f16_qc8w(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_create_fully_connected_nc_qd8_f32_qc8w(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
const float* kernel_scale,
const int8_t* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_setup_fully_connected_nc_qd8_f32_qc8w(
xnn_operator_t fully_connected_op,
const int8_t* input,
float* output,
const struct xnn_quantization_params* quantization_params);
enum xnn_status xnn_reshape_fully_connected_nc_qd8_f32_qc8w(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_create_fully_connected_nc_qs8(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
int8_t input_zero_point,
float input_scale,
float kernel_scale,
const int8_t* kernel,
const int32_t* bias,
int8_t output_zero_point,
float output_scale,
int8_t output_min,
int8_t output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_reshape_fully_connected_nc_qs8(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_fully_connected_nc_qs8(
xnn_operator_t fully_connected_op,
const int8_t* input,
int8_t* output);
enum xnn_status xnn_create_fully_connected_nc_qs8_qc8w(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
int8_t input_zero_point,
float input_scale,
const float* kernel_scale,
const int8_t* kernel,
const int32_t* bias,
int8_t output_zero_point,
float output_scale,
int8_t output_min,
int8_t output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_reshape_fully_connected_nc_qs8_qc8w(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_fully_connected_nc_qs8_qc8w(
xnn_operator_t fully_connected_op,
const int8_t* input,
int8_t* output);
enum xnn_status xnn_create_fully_connected_nc_qu8(
size_t input_channels,
size_t output_channels,
size_t input_stride,
size_t output_stride,
uint8_t input_zero_point,
float input_scale,
uint8_t kernel_zero_point,
float kernel_scale,
const uint8_t* kernel,
const int32_t* bias,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* fully_connected_op_out);
enum xnn_status xnn_reshape_fully_connected_nc_qu8(
xnn_operator_t fully_connected_op,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_fully_connected_nc_qu8(
xnn_operator_t fully_connected_op,
const uint8_t* input,
uint8_t* output);
enum xnn_status xnn_create_max_pooling2d_nhwc_f16(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
float output_min,
float output_max,
uint32_t flags,
xnn_operator_t* max_pooling_op_out);
enum xnn_status xnn_reshape_max_pooling2d_nhwc_f16(
xnn_operator_t max_pooling_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_max_pooling2d_nhwc_f16(
xnn_operator_t max_pooling_op,
const void* input,
void* output);
enum xnn_status xnn_create_max_pooling2d_nhwc_f32(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
float output_min,
float output_max,
uint32_t flags,
xnn_operator_t* max_pooling_op_out);
enum xnn_status xnn_reshape_max_pooling2d_nhwc_f32(
xnn_operator_t max_pooling_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_max_pooling2d_nhwc_f32(
xnn_operator_t max_pooling_op,
const float* input,
float* output);
enum xnn_status xnn_create_max_pooling2d_nhwc_s8(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
int8_t output_min,
int8_t output_max,
uint32_t flags,
xnn_operator_t* max_pooling_op_out);
enum xnn_status xnn_reshape_max_pooling2d_nhwc_s8(
xnn_operator_t max_pooling_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_max_pooling2d_nhwc_s8(
xnn_operator_t max_pooling_op,
const int8_t* input,
int8_t* output);
enum xnn_status xnn_create_max_pooling2d_nhwc_u8(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
uint32_t stride_height,
uint32_t stride_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
xnn_operator_t* max_pooling_op_out);
enum xnn_status xnn_reshape_max_pooling2d_nhwc_u8(
xnn_operator_t max_pooling_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_max_pooling2d_nhwc_u8(
xnn_operator_t max_pooling_op,
const uint8_t* input,
uint8_t* output);
enum xnn_status xnn_create_reduce_nd(
enum xnn_reduce_operator reduce_operator_type,
enum xnn_datatype datatype,
const struct xnn_quantization_params* input_quantization,
const struct xnn_quantization_params* output_quantization,
uint32_t flags,
xnn_operator_t* reduce_op_out);
enum xnn_status xnn_reshape_reduce_nd( //
xnn_operator_t reduce_op, //
size_t num_reduction_axes, //
const int64_t* reduction_axes, //
size_t num_input_dims, //
const size_t* input_shape, //
size_t* workspace_size, //
size_t* workspace_alignment, //
pthreadpool_t threadpool);
enum xnn_status xnn_setup_reduce_nd(
xnn_operator_t reduce_op,
void* workspace,
const void* input,
void* output);
enum xnn_status xnn_create_resize_bilinear2d_nchw_f32(
size_t output_height,
size_t output_width,
uint32_t flags,
xnn_operator_t* resize_op_out);
enum xnn_status xnn_reshape_resize_bilinear2d_nchw_f32(
xnn_operator_t resize_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_resize_bilinear2d_nchw_f32(
xnn_operator_t resize_op,
const float* input,
float* output);
enum xnn_status xnn_create_resize_bilinear2d_nchw_f16(
size_t output_height,
size_t output_width,
uint32_t flags,
xnn_operator_t* resize_op_out);
enum xnn_status xnn_reshape_resize_bilinear2d_nchw_f16(
xnn_operator_t resize_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_resize_bilinear2d_nchw_f16(
xnn_operator_t resize_op,
const void* input,
void* output);
enum xnn_status xnn_create_resize_bilinear2d_nhwc_f16(
size_t output_height,
size_t output_width,
uint32_t flags,
xnn_operator_t* resize_op_out);
enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_f16(
xnn_operator_t resize_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
size_t* workspace_size,
size_t* workspace_alignment,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_resize_bilinear2d_nhwc_f16(
xnn_operator_t resize_op,
void* workspace,
const void* input,
void* output);
enum xnn_status xnn_create_resize_bilinear2d_nhwc_f32(
size_t output_height,
size_t output_width,
uint32_t flags,
xnn_operator_t* resize_op_out);
enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_f32(
xnn_operator_t resize_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
size_t* workspace_size,
size_t* workspace_alignment,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_resize_bilinear2d_nhwc_f32(
xnn_operator_t resize_op,
void* workspace,
const float* input,
float* output);
enum xnn_status xnn_create_resize_bilinear2d_nhwc_s8(
size_t output_height,
size_t output_width,
uint32_t flags,
xnn_operator_t* resize_op_out);
enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_s8(
xnn_operator_t resize_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
size_t* workspace_size,
size_t* workspace,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_resize_bilinear2d_nhwc_s8(
xnn_operator_t resize_op,
void* workspace,
const int8_t* input,
int8_t* output);
enum xnn_status xnn_create_resize_bilinear2d_nhwc_u8(
size_t output_height,
size_t output_width,
uint32_t flags,
xnn_operator_t* resize_op_out);
enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_u8(
xnn_operator_t resize_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
size_t* workspace_size,
size_t* workspace_alignment,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_resize_bilinear2d_nhwc_u8(
xnn_operator_t resize_op,
void* workspace,
const uint8_t* input,
uint8_t* output);
enum xnn_status xnn_create_rope_nthc_f16(
uint32_t flags,
xnn_operator_t* rope_op_out);
enum xnn_status xnn_reshape_rope_nthc_f16(
xnn_operator_t rope_op,
size_t batch_size,
size_t tokens,
size_t heads,
size_t channels,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_rope_nthc_f16(
xnn_operator_t rope_op,
const void* input,
const void* weights,
void* output);
enum xnn_status xnn_create_rope_nthc_f32(
uint32_t flags,
xnn_operator_t* rope_op_out);
enum xnn_status xnn_reshape_rope_nthc_f32(
xnn_operator_t rope_op,
size_t batch_size,
size_t tokens,
size_t heads,
size_t channels,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_rope_nthc_f32(
xnn_operator_t rope_op,
const float* input,
const float* weights,
float* output);
// N: batch size
// H: number of heads
// T: tokens (sequence length)
// C: channels (head dimension)
enum xnn_status xnn_create_scaled_dot_product_attention_nhtc_f16(
enum xnn_attention_logits_cap_type cap_type,
const void* cap_params,
uint32_t flags,
xnn_operator_t* attention_op_out);
enum xnn_status xnn_reshape_scaled_dot_product_attention_nhtc_f16(
xnn_operator_t attention_op,
size_t batch_size,
size_t query_heads,
// Number of tokens in query.
size_t query_tokens,
size_t key_value_heads,
// Number of tokens in key/value. For self-attention, this is same as tokens.
size_t key_value_tokens,
size_t query_key_channels,
size_t value_channels,
size_t* workspace_size,
size_t* workspace_alignment,
pthreadpool_t threadpool);
// Query is of dimension [batch_size, query_heads, query_tokens, channels].
// Key and value are of dimension [batch_size, key_value_heads, key_value_tokens, channels].
// Scale is of dimension [channels].
// Mask is of dimension [query_tokens, key_value_tokens].
enum xnn_status xnn_setup_scaled_dot_product_attention_nhtc_f16(
xnn_operator_t attention_op,
void* workspace,
const void* query,
const void* key,
const void* value,
const void* scale,
const void* mask,
void* output);
// N: batch size
// H: number of heads
// T: tokens (sequence length)
// C: channels (head dimension)
enum xnn_status xnn_create_scaled_dot_product_attention_nhtc_f32(
enum xnn_attention_logits_cap_type cap_type,
const void* cap_params,
uint32_t flags,
xnn_operator_t* attention_op_out);
enum xnn_status xnn_reshape_scaled_dot_product_attention_nhtc_f32(
xnn_operator_t attention_op,
size_t batch_size,
size_t query_heads,
// Number of tokens in query.
size_t query_tokens,
size_t key_value_heads,
// Number of tokens in key/value. For self-attention, this is same as tokens.
size_t key_value_tokens,
size_t query_key_channels,
size_t value_channels,
size_t* workspace_size,
size_t* workspace_alignment,
pthreadpool_t threadpool);
// Query is of dimension [batch_size, query_heads, query_tokens, query_key_channels].
// Key and value are of dimension [batch_size, key_value_heads, key_value_tokens, query_key_channels].
// Scale is of dimension [query_key_channels].
// Mask is of dimension [query_tokens, key_value_tokens].
// Output is of dimension [batch_size, query_heads, query_tokens, value_channels].
enum xnn_status xnn_setup_scaled_dot_product_attention_nhtc_f32(
xnn_operator_t attention_op,
void* workspace,
const float* query,
const float* key,
const float* value,
const float* scale,
const float* mask,
float* output);
enum xnn_status xnn_create_slice_nd_x16(
uint32_t flags,
xnn_operator_t* slice_op_out);
enum xnn_status xnn_reshape_slice_nd_x16(
xnn_operator_t slice_op,
size_t num_dims,
const size_t* input_shape,
const size_t* offsets,
const size_t* sizes,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_slice_nd_x16(
xnn_operator_t slice_op,
const void* input,
void* output);
enum xnn_status xnn_create_slice_nd_x32(
uint32_t flags,
xnn_operator_t* slice_op_out);
enum xnn_status xnn_reshape_slice_nd_x32(
xnn_operator_t slice_op,
size_t num_dims,
const size_t* input_shape,
const size_t* offsets,
const size_t* sizes,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_slice_nd_x32(
xnn_operator_t slice_op,
const void* input,
void* output);
enum xnn_status xnn_run_slice_nd_x32(
size_t num_dims,
const size_t* input_shape,
const size_t* offsets,
const size_t* sizes,
const void* input,
void* output,
uint32_t flags,
pthreadpool_t threadpool);
enum xnn_status xnn_create_softmax_nc_f16(
uint32_t flags,
xnn_operator_t* softmax_op_out);
enum xnn_status xnn_reshape_softmax_nc_f16(
xnn_operator_t softmax_op,
size_t channels,
size_t input_stride,
size_t output_stride,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_softmax_nc_f16(
xnn_operator_t softmax_op,
const void* input,
void* output);
enum xnn_status xnn_create_softmax_nc_f32(
uint32_t flags,
xnn_operator_t* softmax_op_out);
enum xnn_status xnn_reshape_softmax_nc_f32(
xnn_operator_t softmax_op,
size_t channels,
size_t input_stride,
size_t output_stride,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_softmax_nc_f32(
xnn_operator_t softmax_op,
const float* input,
float* output);
enum xnn_status xnn_create_softmax_nc_qu8(
float input_scale,
uint8_t output_zero_point,
float output_scale,
uint32_t flags,
xnn_operator_t* softmax_op_out);
enum xnn_status xnn_reshape_softmax_nc_qu8(
xnn_operator_t softmax_op,
size_t channels,
size_t input_stride,
size_t output_stride,
size_t batch_size,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_softmax_nc_qu8(
xnn_operator_t softmax_op,
const uint8_t* input,
uint8_t* output);
enum xnn_status xnn_create_space_to_depth_nhwc_x16(
uint32_t block_size,
uint32_t flags,
xnn_operator_t* space_to_depth_op_out);
enum xnn_status xnn_reshape_space_to_depth_nhwc_x16(
xnn_operator_t space_to_depth_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t input_channels,
size_t* output_height_out,
size_t* output_width_out,
size_t* output_channels_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_space_to_depth_nhwc_x16(
xnn_operator_t space_to_depth_op,
const void* input,
void* output);
enum xnn_status xnn_create_space_to_depth_nhwc_x32(
uint32_t block_size,
uint32_t flags,
xnn_operator_t* space_to_depth_op_out);
enum xnn_status xnn_reshape_space_to_depth_nhwc_x32(
xnn_operator_t space_to_depth_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t input_channels,
size_t* output_height_out,
size_t* output_width_out,
size_t* output_channels_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_space_to_depth_nhwc_x32(
xnn_operator_t space_to_depth_op,
const void* input,
void* output);
enum xnn_status xnn_create_transpose_nd_x8(
uint32_t flags,
xnn_operator_t* transpose_op_out);
enum xnn_status xnn_reshape_transpose_nd_x8(
xnn_operator_t transpose_op,
size_t num_dims,
const size_t* input_shape,
const size_t* output_perm,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_transpose_nd_x8(
xnn_operator_t transpose_op,
const void* input,
void* output);
enum xnn_status xnn_run_transpose_nd_x8(
const void* input,
void* output,
size_t num_dims,
const size_t* input_shape,
const size_t* output_perm,
uint32_t flags,
pthreadpool_t threadpool);
enum xnn_status xnn_create_transpose_nd_x16(
uint32_t flags,
xnn_operator_t* transpose_op_out);
enum xnn_status xnn_reshape_transpose_nd_x16(
xnn_operator_t transpose_op,
size_t num_dims,
const size_t* input_shape,
const size_t* output_perm,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_transpose_nd_x16(
xnn_operator_t transpose_op,
const void* input,
void* output);
enum xnn_status xnn_run_transpose_nd_x16(
const void* input,
void* output,
size_t num_dims,
const size_t* input_shape,
const size_t* output_perm,
uint32_t flags,
pthreadpool_t threadpool);
enum xnn_status xnn_create_transpose_nd_x32(
uint32_t flags,
xnn_operator_t* transpose_op_out);
enum xnn_status xnn_reshape_transpose_nd_x32(
xnn_operator_t transpose_op,
size_t num_dims,
const size_t* input_shape,
const size_t* output_perm,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_transpose_nd_x32(
xnn_operator_t transpose_op,
const void* input,
void* output);
enum xnn_status xnn_run_transpose_nd_x32(
const void* input,
void* output,
size_t num_dims,
const size_t* input_shape,
const size_t* output_perm,
uint32_t flags,
pthreadpool_t threadpool);
enum xnn_status xnn_create_transpose_nd_x64(
uint32_t flags,
xnn_operator_t* transpose_op_out);
enum xnn_status xnn_reshape_transpose_nd_x64(
xnn_operator_t transpose_op,
size_t num_dims,
const size_t* input_shape,
const size_t* output_perm,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_transpose_nd_x64(
xnn_operator_t transpose_op,
const void* input,
void* output);
enum xnn_status xnn_run_transpose_nd_x64(
const void* input,
void* output,
size_t num_dims,
const size_t* input_shape,
const size_t* output_perm,
uint32_t flags,
pthreadpool_t threadpool);
enum xnn_status xnn_create_unpooling2d_nhwc_x32(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t pooling_height,
uint32_t pooling_width,
size_t channels,
size_t input_pixel_stride,
size_t output_pixel_stride,
uint32_t flags,
xnn_operator_t* unpooling_op_out);
enum xnn_status xnn_reshape_unpooling2d_nhwc_x32(
xnn_operator_t unpooling_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_unpooling2d_nhwc_x32(
xnn_operator_t unpooling_op,
const void* input,
const uint32_t* index,
void* output);
enum xnn_status xnn_create_slice_nd_x8(
uint32_t flags,
xnn_operator_t* slice_op_out);
enum xnn_status xnn_reshape_slice_nd_x8(
xnn_operator_t slice_op,
size_t num_dims,
const size_t* input_shape,
const size_t* offsets,
const size_t* sizes,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_slice_nd_x8(
xnn_operator_t slice_op,
const void* input,
void* output);
enum xnn_status xnn_create_space_to_depth_nhwc_x8(
uint32_t block_size,
uint32_t flags,
xnn_operator_t* space_to_depth_op_out);
enum xnn_status xnn_reshape_space_to_depth_nhwc_x8(
xnn_operator_t space_to_depth_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t input_channels,
size_t* output_height_out,
size_t* output_width_out,
size_t* output_channels_out,
pthreadpool_t threadpool);
enum xnn_status xnn_setup_space_to_depth_nhwc_x8(
xnn_operator_t space_to_depth_op,
const void* input,
void* output);
#ifdef __cplusplus
} // extern "C"
#endif
```
|
=============================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 8.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\__init__.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import warnings
from collections.abc import Iterator
from contextlib import contextmanager
from typing import Any
import torch._C
# These are imported so users can access them from the `torch.jit` module
from torch._jit_internal import (
_Await,
_drop,
_IgnoreContextManager,
_isinstance,
_overload,
_overload_method,
export,
Final,
Future,
ignore,
is_scripting,
unused,
)
from torch.jit._async import fork, wait
from torch.jit._await import _awaitable, _awaitable_nowait, _awaitable_wait
from torch.jit._decomposition_utils import _register_decomposition
from torch.jit._freeze import freeze, optimize_for_inference, run_frozen_optimizations
from torch.jit._fuser import (
fuser,
last_executed_optimized_graph,
optimized_execution,
set_fusion_strategy,
)
from torch.jit._ir_utils import _InsertPoint
from torch.jit._script import (
_ScriptProfile,
_unwrap_optional,
Attribute,
CompilationUnit,
interface,
RecursiveScriptClass,
RecursiveScriptModule,
script,
script_method,
ScriptFunction,
ScriptModule,
ScriptWarning,
)
from torch.jit._serialization import (
jit_module_from_flatbuffer,
load,
save,
save_jit_module_to_flatbuffer,
)
from torch.jit._trace import (
_flatten,
_get_trace_graph,
_script_if_tracing,
_unique_state_dict,
is_tracing,
ONNXTracedModule,
TopLevelTracedModule,
trace,
trace_module,
TracedModule,
TracerWarning,
TracingCheckError,
)
from torch.utils import set_module
__all__ = [
"Attribute",
"CompilationUnit",
"Error",
"Future",
"ScriptFunction",
"ScriptModule",
"annotate",
"enable_onednn_fusion",
"export",
"export_opnames",
"fork",
"freeze",
"interface",
"ignore",
"isinstance",
"load",
"onednn_fusion_enabled",
"optimize_for_inference",
"save",
"script",
"script_if_tracing",
"set_fusion_strategy",
"strict_fusion",
"trace",
"trace_module",
"unused",
"wait",
]
# For backwards compatibility
_fork = fork
_wait = wait
_set_fusion_strategy = set_fusion_strategy
def export_opnames(m):
r"""
Generate new bytecode for a Script module.
Returns what the op list would be for a Script Module based off the current code base.
If you have a LiteScriptModule and want to get the currently present
list of ops call _export_operator_list instead.
"""
return torch._C._export_opnames(m._c)
# torch.jit.Error
Error = torch._C.JITException
set_module(Error, "torch.jit")
# This is not perfect but works in common cases
Error.__name__ = "Error"
Error.__qualname__ = "Error"
# for use in python if using annotate
def annotate(the_type, the_value):
"""Use to give type of `the_value` in TorchScript compiler.
This method is a pass-through function that returns `the_value`, used to hint TorchScript
compiler the type of `the_value`. It is a no-op when running outside of TorchScript.
Though TorchScript can infer correct type for most Python expressions, there are some cases where
type inference can be wrong, including:
- Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor`
- Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume
it is type `T` rather than `Optional[T]`
Note that `annotate()` does not help in `__init__` method of `torch.nn.Module` subclasses because it
is executed in eager mode. To annotate types of `torch.nn.Module` attributes,
use :meth:`~torch.jit.Attribute` instead.
Example:
.. testcode::
import torch
from typing import Dict
@torch.jit.script
def fn():
# Telling TorchScript that this empty dictionary is a (str -> int) dictionary
# instead of default dictionary type of (str -> Tensor).
d = torch.jit.annotate(Dict[str, int], {})
# Without `torch.jit.annotate` above, following statement would fail because of
# type mismatch.
d["name"] = 20
.. testcleanup::
del fn
Args:
the_type: Python type that should be passed to TorchScript compiler as type hint for `the_value`
the_value: Value or expression to hint type for.
Returns:
`the_value` is passed back as return value.
"""
return the_value
def script_if_tracing(fn):
"""
Compiles ``fn`` when it is first called during tracing.
``torch.jit.script`` has a non-negligible start up time when it is first called due to
lazy-initializations of many compiler builtins. Therefore you should not use
it in library code. However, you may want to have parts of your library work
in tracing even if they use control flow. In these cases, you should use
``@torch.jit.script_if_tracing`` to substitute for
``torch.jit.script``.
Args:
fn: A function to compile.
Returns:
If called during tracing, a :class:`ScriptFunction` created by `torch.jit.script` is returned.
Otherwise, the original function `fn` is returned.
"""
return _script_if_tracing(fn)
# for torch.jit.isinstance
def isinstance(obj, target_type):
"""
Provide container type refinement in TorchScript.
It can refine parameterized containers of the List, Dict, Tuple, and Optional types. E.g. ``List[str]``,
``Dict[str, List[torch.Tensor]]``, ``Optional[Tuple[int,str,int]]``. It can also
refine basic types such as bools and ints that are available in TorchScript.
Args:
obj: object to refine the type of
target_type: type to try to refine obj to
Returns:
``bool``: True if obj was successfully refined to the type of target_type,
False otherwise with no new type refinement
Example (using ``torch.jit.isinstance`` for type refinement):
.. testcode::
import torch
from typing import Any, Dict, List
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, input: Any): # note the Any type
if torch.jit.isinstance(input, List[torch.Tensor]):
for t in input:
y = t.clamp(0, 0.5)
elif torch.jit.isinstance(input, Dict[str, str]):
for val in input.values():
print(val)
m = torch.jit.script(MyModule())
x = [torch.rand(3,3), torch.rand(4,3)]
m(x)
y = {"key1":"val1","key2":"val2"}
m(y)
"""
return _isinstance(obj, target_type)
class strict_fusion:
"""
Give errors if not all nodes have been fused in inference, or symbolically differentiated in training.
Example:
Forcing fusion of additions.
.. code-block:: python
@torch.jit.script
def foo(x):
with torch.jit.strict_fusion():
return x + x + x
"""
def __init__(self) -> None:
if not torch._jit_internal.is_scripting():
warnings.warn("Only works in script mode")
def __enter__(self):
pass
def __exit__(self, type: Any, value: Any, tb: Any) -> None:
pass
# Context manager for globally hiding source ranges when printing graphs.
# Note that these functions are exposed to Python as static members of the
# Graph class, so mypy checks need to be skipped.
@contextmanager
def _hide_source_ranges() -> Iterator[None]:
old_enable_source_ranges = torch._C.Graph.global_print_source_ranges # type: ignore[attr-defined]
try:
torch._C.Graph.set_global_print_source_ranges(False) # type: ignore[attr-defined]
yield
finally:
torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges) # type: ignore[attr-defined]
def enable_onednn_fusion(enabled: bool):
"""Enable or disables onednn JIT fusion based on the parameter `enabled`."""
torch._C._jit_set_llga_enabled(enabled)
def onednn_fusion_enabled():
"""Return whether onednn JIT fusion is enabled."""
return torch._C._jit_llga_enabled()
del Any
if not torch._C._jit_init():
raise RuntimeError("JIT initialization failed")
```
|
===========================================================================================================
SOURCE CODE FILE: _async.py
LINES: 1
SIZE: 3.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_async.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Async API.
This module contains the API for parallelism in TorchScript, notably:
* torch.jit.fork
* torch.jit.wait
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
import torch
from torch._jit_internal import Future
from torch.jit._builtins import _register_builtin
from torch.utils import set_module
set_module(Future, "torch.jit")
def fork(func, *args, **kwargs):
r"""
Create an asynchronous task executing `func` and a reference to the value of the result of this execution.
`fork` will return immediately, so the return value of `func` may not have been computed yet. To force completion
of the task and access the return value invoke `torch.jit.wait` on the Future. `fork` invoked
with a `func` which returns `T` is typed as `torch.jit.Future[T]`. `fork` calls can be arbitrarily
nested, and may be invoked with positional and keyword arguments.
Asynchronous execution will only occur when run in TorchScript. If run in pure python,
`fork` will not execute in parallel. `fork` will also not execute in parallel when invoked
while tracing, however the `fork` and `wait` calls will be captured in the exported IR Graph.
.. warning::
`fork` tasks will execute non-deterministically. We recommend only spawning
parallel fork tasks for pure functions that do not modify their inputs,
module attributes, or global state.
Args:
func (callable or torch.nn.Module): A Python function or `torch.nn.Module`
that will be invoked. If executed in TorchScript, it will execute asynchronously,
otherwise it will not. Traced invocations of fork will be captured in the IR.
``*args``, ``**kwargs``: arguments to invoke `func` with.
Returns:
`torch.jit.Future[T]`: a reference to the execution of `func`. The value `T`
can only be accessed by forcing completion of `func` through `torch.jit.wait`.
Example (fork a free function):
.. code-block:: python
import torch
from torch import Tensor
def foo(a: Tensor, b: int) -> Tensor:
return a + b
def bar(a):
fut: torch.jit.Future[Tensor] = torch.jit.fork(foo, a, b=2)
return torch.jit.wait(fut)
script_bar = torch.jit.script(bar)
input = torch.tensor(2)
# only the scripted version executes asynchronously
assert script_bar(input) == bar(input)
# trace is not run asynchronously, but fork is captured in IR
graph = torch.jit.trace(bar, (input,)).graph
assert "fork" in str(graph)
Example (fork a module method):
.. code-block:: python
import torch
from torch import Tensor
class AddMod(torch.nn.Module):
def forward(self, a: Tensor, b: int):
return a + b
class Mod(torch.nn.Module):
def __init__(self) -> None:
super(self).__init__()
self.mod = AddMod()
def forward(self, input):
fut = torch.jit.fork(self.mod, a, b=2)
return torch.jit.wait(fut)
input = torch.tensor(2)
mod = Mod()
assert mod(input) == torch.jit.script(mod).forward(input)
"""
return torch._C.fork(func, *args, **kwargs)
def wait(future):
r"""
Force completion of a `torch.jit.Future[T]` asynchronous task, returning the result of the task.
See :func:`~fork` for docs and examples.
Args:
future (torch.jit.Future[T]): an asynchronous task reference, created through `torch.jit.fork`
Returns:
`T`: the return value of the completed task
"""
return torch._C.wait(future)
_register_builtin(wait, "aten::wait")
```
|
===========================================================================================================
SOURCE CODE FILE: _await.py
LINES: 1
SIZE: 0.86 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_await.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch
from torch._jit_internal import _Await
from torch.jit._builtins import _register_builtin
from torch.utils import set_module
set_module(_Await, "torch.jit")
def _awaitable(func, *args, **kwargs):
r"""Create Await object that will call specified functioni with specified args, when it is requested for the result."""
return torch._C._awaitable(func, *args, **kwargs)
def _awaitable_wait(aw):
r"""Request await the result of execution, if Await is not completed yet, the func will be called immediately."""
return torch._C._awaitable_wait(aw)
def _awaitable_nowait(o):
r"""Create completed Await with specified result."""
return torch._C._awaitable_nowait(o)
_register_builtin(_awaitable_wait, "prim::awaitable_wait")
_register_builtin(_awaitable_nowait, "prim::awaitable_nowait")
```
|
==============================================================================================================
SOURCE CODE FILE: _builtins.py
LINES: 1
SIZE: 6.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_builtins.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import cmath
import math
import warnings
from collections import OrderedDict
from typing import Optional
import torch
import torch.backends.cudnn as cudnn
from torch.nn.modules.utils import (
_list_with_default,
_pair,
_quadruple,
_single,
_triple,
)
_builtin_table: Optional[dict[int, str]] = None
_modules_containing_builtins = (torch, torch._C._nn, torch._C._fft, torch._C._linalg, torch._C._nested, torch._C._sparse, torch._C._special) # type: ignore[attr-defined] # noqa: B950
_builtin_ops = [
# Pairs of (function, op_name)
(_pair, "aten::_pair"),
(_quadruple, "aten::_quadruple"),
(_single, "aten::_single"),
(_triple, "aten::_triple"),
(_list_with_default, "aten::list_with_default"),
(OrderedDict, "aten::dict"),
(dict, "aten::dict"),
(cudnn.is_acceptable, "aten::cudnn_is_acceptable"),
(math.ceil, "aten::ceil"),
(math.copysign, "aten::copysign"),
(math.erf, "aten::erf"),
(math.erfc, "aten::erfc"),
(math.exp, "aten::exp"),
(math.expm1, "aten::expm1"),
(math.fabs, "aten::fabs"),
(math.floor, "aten::floor"),
(math.gamma, "aten::gamma"),
(math.lgamma, "aten::lgamma"),
(math.log, "aten::log"),
(math.log10, "aten::log10"),
(math.log1p, "aten::log1p"),
(math.pow, "aten::pow"),
(math.sqrt, "aten::sqrt"),
(math.isnan, "aten::isnan"),
(math.asinh, "aten::asinh"),
(math.atanh, "aten::atanh"),
(math.cosh, "aten::cosh"),
(math.sinh, "aten::sinh"),
(math.tanh, "aten::tanh"),
(math.acos, "aten::acos"),
(math.asin, "aten::asin"),
(math.atan, "aten::atan"),
(math.atan2, "aten::atan2"),
(math.cos, "aten::cos"),
(math.sin, "aten::sin"),
(math.tan, "aten::tan"),
(math.asinh, "aten::asinh"),
(math.atanh, "aten::atanh"),
(math.acosh, "aten::acosh"),
(math.fmod, "aten::fmod"),
(math.modf, "aten::modf"),
(math.factorial, "aten::factorial"),
(math.frexp, "aten::frexp"),
(math.isinf, "aten::isinf"),
(math.degrees, "aten::degrees"),
(math.radians, "aten::radians"),
(cmath.isnan, "aten::isnan"),
(cmath.isfinite, "aten::isfinite"),
(cmath.isinf, "aten::isinf"),
(cmath.phase, "aten::angle"),
(cmath.rect, "aten::polar"),
(cmath.log, "aten::log"),
(cmath.log10, "aten::log10"),
(cmath.sqrt, "aten::sqrt"),
(cmath.exp, "aten::exp"),
(cmath.sin, "aten::sin"),
(cmath.tan, "aten::tan"),
(cmath.cos, "aten::cos"),
(cmath.asin, "aten::asin"),
(cmath.acos, "aten::acos"),
(cmath.atan, "aten::atan"),
(cmath.sinh, "aten::sinh"),
(cmath.cosh, "aten::cosh"),
(cmath.tanh, "aten::tanh"),
(cmath.asinh, "aten::asinh"),
(cmath.acosh, "aten::acosh"),
(cmath.atanh, "aten::atanh"),
(math.ldexp, "aten::ldexp"),
(torch._assert, "aten::_assert"),
(torch.autograd.grad, "aten::grad"),
(torch.autograd.backward, "aten::backward"),
(torch._C._infer_size, "aten::_infer_size"),
(torch.nn.functional._no_grad_embedding_renorm_, "aten::_no_grad_embedding_renorm_"), # type: ignore[attr-defined]
(torch.nn.functional.assert_int_or_pair, "aten::_assert_int_or_pair"),
(torch.nn.init._no_grad_fill_, "aten::_no_grad_fill_"),
(torch.nn.init._no_grad_normal_, "aten::_no_grad_normal_"),
(torch.nn.init._no_grad_uniform_, "aten::_no_grad_uniform_"),
(torch.nn.init._no_grad_zero_, "aten::_no_grad_zero_"),
(torch._C._get_tracing_state, "aten::_get_tracing_state"),
(torch._C._get_cpu_capability, "aten::_get_cpu_capability"),
(warnings.warn, "aten::warn"),
(torch._VF.stft, "aten::stft"), # type: ignore[attr-defined]
(torch._VF.istft, "aten::istft"), # type: ignore[attr-defined]
(torch._VF.cdist, "aten::cdist"), # type: ignore[attr-defined]
(torch._VF.norm, "aten::norm"), # type: ignore[attr-defined]
(torch._VF.unique_dim, "aten::unique_dim"),
(torch._VF.unique_consecutive, "aten::unique_consecutive"), # type: ignore[attr-defined]
(torch._VF.nuclear_norm, "aten::nuclear_norm"),
(torch._VF.frobenius_norm, "aten::frobenius_norm"),
(torch._VF.tensordot, "aten::tensordot"), # type: ignore[attr-defined]
]
# ops in torch.functional are bound to torch
# in these cases, we want to resolve the function to their python implementation
# instead looking up a builtin "aten::" schema
def _gen_torch_functional_registered_ops():
# eventually ops should encompass all of torch/functional.py, (torch.functional.__all__)
# but we are currently only able to compile some of the functions. additionally,
# some functions directly map to their aten:: implementations.
# TODO: add support for more ops
ops = [
"stft",
"istft",
"lu",
"cdist",
"norm",
"unique",
"unique_consecutive",
"tensordot",
]
return {getattr(torch.functional, name) for name in ops}
_functional_registered_ops = _gen_torch_functional_registered_ops()
def _is_special_functional_bound_op(fn):
return fn in _functional_registered_ops
# lazily built to ensure the correct initialization order
def _get_builtin_table():
global _builtin_table
if _builtin_table is not None:
return _builtin_table
_builtin_table = {}
def register_all(mod):
for name in dir(mod):
v = getattr(mod, name)
if (
callable(v)
and not _is_special_functional_bound_op(v)
and v is not torch.no_grad
and v is not torch.autocast
):
# Fixup inconsistency in segment_reduce
if name == "_segment_reduce":
name = name[1:]
_builtin_ops.append((v, "aten::" + name))
for mod in _modules_containing_builtins:
register_all(mod)
_builtin_ops.append((math.gcd, "aten::gcd"))
_builtin_ops.append((math.isfinite, "aten::isfinite"))
_builtin_ops.append((math.remainder, "aten::mathremainder")) # type: ignore[attr-defined]
import torch.distributed.autograd as dist_autograd
if dist_autograd.is_available():
_builtin_ops.append((dist_autograd.get_gradients, "aten::get_gradients"))
_builtin_ops.append((dist_autograd.backward, "aten::dist_backward"))
# populate the _builtin_table from _builtin_ops
for builtin, aten_op in _builtin_ops:
_builtin_table[id(builtin)] = aten_op
return _builtin_table
def _register_builtin(fn, op):
_get_builtin_table()[id(fn)] = op
def _find_builtin(fn):
return _get_builtin_table().get(id(fn))
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.