text
stringlengths
145
7.65M
================================================================================================================================= SOURCE CODE FILE: StorageSharing.h LINES: 1 SIZE: 0.14 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\StorageSharing.h ENCODING: utf-8 ```h #ifndef THP_STORAGE_SHARING_INC #define THP_STORAGE_SHARING_INC #include <Python.h> PyMethodDef* THPStorage_getSharingMethods(); #endif ```
========================================================================================================================= SOURCE CODE FILE: Stream.h LINES: 1 SIZE: 0.69 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\Stream.h ENCODING: utf-8 ```h #ifndef THP_STREAM_INC #define THP_STREAM_INC #include <c10/core/Stream.h> #include <c10/macros/Export.h> #include <torch/csrc/Export.h> #include <torch/csrc/python_headers.h> struct THPStream { PyObject_HEAD int64_t stream_id; int64_t device_type; int64_t device_index; // Used to switch stream context management, initialized lazily. PyObject* context; }; extern TORCH_API PyTypeObject* THPStreamClass; void THPStream_init(PyObject* module); inline bool THPStream_Check(PyObject* obj) { return THPStreamClass && PyObject_IsInstance(obj, (PyObject*)THPStreamClass); } TORCH_PYTHON_API PyObject* THPStream_Wrap(const c10::Stream& stream); #endif // THP_STREAM_INC ```
=========================================================================================================================== SOURCE CODE FILE: THConcat.h LINES: 1 SIZE: 0.69 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\THConcat.h ENCODING: utf-8 ```h #pragma once #define TH_CONCAT_STRING_2(x, y) TH_CONCAT_STRING_2_EXPAND(x, y) #define TH_CONCAT_STRING_2_EXPAND(x, y) #x #y #define TH_CONCAT_STRING_3(x, y, z) TH_CONCAT_STRING_3_EXPAND(x, y, z) #define TH_CONCAT_STRING_3_EXPAND(x, y, z) #x #y #z #define TH_CONCAT_STRING_4(x, y, z, w) TH_CONCAT_STRING_4_EXPAND(x, y, z, w) #define TH_CONCAT_STRING_4_EXPAND(x, y, z, w) #x #y #z #w #define TH_CONCAT_2(x, y) TH_CONCAT_2_EXPAND(x, y) #define TH_CONCAT_2_EXPAND(x, y) x##y #define TH_CONCAT_3(x, y, z) TH_CONCAT_3_EXPAND(x, y, z) #define TH_CONCAT_3_EXPAND(x, y, z) x##y##z #define TH_CONCAT_4_EXPAND(x, y, z, w) x##y##z##w #define TH_CONCAT_4(x, y, z, w) TH_CONCAT_4_EXPAND(x, y, z, w) ```
====================================================================================================================== SOURCE CODE FILE: THP.h LINES: 1 SIZE: 0.90 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\THP.h ENCODING: utf-8 ```h #ifndef THP_H #define THP_H #include <torch/csrc/Export.h> #include <torch/csrc/python_headers.h> // Back-compatibility macros, Thanks to http://cx-oracle.sourceforge.net/ // define PyInt_* macros for Python 3.x. NB: We must include Python.h first, // otherwise we'll incorrectly conclude PyInt_Check isn't defined! #ifndef PyInt_Check #define PyInt_Check PyLong_Check #define PyInt_FromLong PyLong_FromLong #define PyInt_AsLong PyLong_AsLong #define PyInt_Type PyLong_Type #endif #include <torch/csrc/Exceptions.h> #include <torch/csrc/Generator.h> #include <torch/csrc/Module.h> #include <torch/csrc/Size.h> #include <torch/csrc/Storage.h> #include <torch/csrc/Types.h> #include <torch/csrc/utils.h> // This requires defined Storage and Tensor types #include <torch/csrc/utils/byte_order.h> #include <torch/csrc/serialization.h> #include <torch/csrc/autograd/python_autograd.h> #endif ```
=========================================================================================================================== SOURCE CODE FILE: TypeInfo.h LINES: 1 SIZE: 0.58 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\TypeInfo.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <torch/csrc/python_headers.h> #include <ATen/ATen.h> struct THPDTypeInfo { PyObject_HEAD at::ScalarType type; }; struct THPFInfo : THPDTypeInfo {}; struct THPIInfo : THPDTypeInfo {}; TORCH_PYTHON_API extern PyTypeObject THPFInfoType; TORCH_PYTHON_API extern PyTypeObject THPIInfoType; inline bool THPFInfo_Check(PyObject* obj) { return Py_TYPE(obj) == &THPFInfoType; } inline bool THPIInfo_Check(PyObject* obj) { return Py_TYPE(obj) == &THPIInfoType; } void THPDTypeInfo_init(PyObject* module); ```
======================================================================================================================== SOURCE CODE FILE: Types.h LINES: 1 SIZE: 0.17 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\Types.h ENCODING: utf-8 ```h #ifndef THP_TYPES_INC #define THP_TYPES_INC #include <cstddef> #ifndef INT64_MAX #include <cstdint> #endif template <typename T> struct THPTypeInfo {}; #endif ```
======================================================================================================================================== SOURCE CODE FILE: all.h LINES: 1 SIZE: 0.57 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\all.h ENCODING: utf-8 ```h #pragma once #if !defined(_MSC_VER) && __cplusplus < 201703L #error C++17 or later compatible compiler is required to use PyTorch. #endif #include <torch/autograd.h> #include <torch/cuda.h> #include <torch/data.h> #include <torch/enum.h> #include <torch/fft.h> #include <torch/jit.h> #include <torch/mps.h> #include <torch/nested.h> #include <torch/nn.h> #include <torch/optim.h> #include <torch/serialize.h> #include <torch/sparse.h> #include <torch/special.h> #include <torch/types.h> #include <torch/utils.h> #include <torch/version.h> #include <torch/xpu.h> ```
======================================================================================================================================== SOURCE CODE FILE: arg.h LINES: 1 SIZE: 1.42 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\arg.h ENCODING: utf-8 ```h #pragma once #include <utility> #define TORCH_ARG(T, name) \ public: \ inline auto name(const T& new_##name) -> decltype(*this) { /* NOLINT */ \ this->name##_ = new_##name; \ return *this; \ } \ inline auto name(T&& new_##name) -> decltype(*this) { /* NOLINT */ \ this->name##_ = std::move(new_##name); \ return *this; \ } \ inline const T& name() const noexcept { /* NOLINT */ \ return this->name##_; \ } \ inline T& name() noexcept { /* NOLINT */ \ return this->name##_; \ } \ \ private: \ T name##_ /* NOLINT */ ```
============================================================================================================================================= SOURCE CODE FILE: autograd.h LINES: 1 SIZE: 0.17 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\autograd.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/autograd/autograd.h> #include <torch/csrc/autograd/autograd_not_implemented_fallback.h> #include <torch/csrc/autograd/custom_function.h> ```
========================================================================================================================================= SOURCE CODE FILE: cuda.h LINES: 1 SIZE: 0.74 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\cuda.h ENCODING: utf-8 ```h #pragma once #include <c10/core/Device.h> #include <c10/macros/Export.h> #include <cstdint> namespace torch::cuda { /// Returns the number of CUDA devices available. c10::DeviceIndex TORCH_API device_count(); /// Returns true if at least one CUDA device is available. bool TORCH_API is_available(); /// Returns true if CUDA is available, and CuDNN is available. bool TORCH_API cudnn_is_available(); /// Sets the seed for the current GPU. void TORCH_API manual_seed(uint64_t seed); /// Sets the seed for all available GPUs. void TORCH_API manual_seed_all(uint64_t seed); /// Waits for all kernels in all streams on a CUDA device to complete. void TORCH_API synchronize(int64_t device_index = -1); } // namespace torch::cuda ```
========================================================================================================================================= SOURCE CODE FILE: data.h LINES: 1 SIZE: 0.30 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data.h ENCODING: utf-8 ```h #pragma once #include <torch/data/dataloader.h> #include <torch/data/datasets.h> #include <torch/data/samplers.h> #include <torch/data/transforms.h> // Some "exports". namespace torch::data { using datasets::BatchDataset; // NOLINT using datasets::Dataset; // NOLINT } // namespace torch::data ```
==================================================================================================================================================== SOURCE CODE FILE: dataloader.h LINES: 1 SIZE: 1.91 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\dataloader.h ENCODING: utf-8 ```h #pragma once #include <torch/data/dataloader/stateful.h> #include <torch/data/dataloader/stateless.h> #include <torch/csrc/utils/variadic.h> #include <c10/util/Exception.h> #include <cstddef> #include <memory> #include <type_traits> #include <utility> namespace torch::data { /// Creates a `DataLoader` instance for a stateless `dataset`, a `sampler` and /// some `options`. template <typename Dataset, typename Sampler> std::enable_if_t< !Dataset::is_stateful, std::unique_ptr<StatelessDataLoader<Dataset, Sampler>>> make_data_loader(Dataset dataset, Sampler sampler, DataLoaderOptions options) { return std::make_unique<StatelessDataLoader<Dataset, Sampler>>( std::move(dataset), std::move(sampler), options); } /// Creates a `DataLoader` instance for a stateless `dataset` and some /// `options`. A sampler (by default a `RandomSampler`) will be constructed from /// the size of the dataset. template <typename Sampler = samplers::RandomSampler, typename Dataset> std::enable_if_t< !Dataset::is_stateful && std::is_constructible_v<Sampler, size_t>, std::unique_ptr<StatelessDataLoader<Dataset, Sampler>>> make_data_loader( Dataset dataset, DataLoaderOptions options = DataLoaderOptions()) { const std::optional<size_t> size = dataset.size(); TORCH_CHECK( size.has_value(), "Expected the dataset to be sized in " "order to construct the Sampler"); return make_data_loader(std::move(dataset), Sampler(*size), options); } /// Creates a `DataLoader` for a stateful `dataset` and some `options`. template <typename Dataset, typename = std::enable_if_t<Dataset::is_stateful>> std::unique_ptr<StatefulDataLoader<Dataset>> make_data_loader( Dataset dataset, DataLoaderOptions options = DataLoaderOptions()) { return std::make_unique<StatefulDataLoader<Dataset>>( std::move(dataset), options); } } // namespace torch::data ```
========================================================================================================================================================= SOURCE CODE FILE: base.h LINES: 1 SIZE: 9.25 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\dataloader\base.h ENCODING: utf-8 ```h #pragma once #include <torch/data/dataloader_options.h> #include <torch/data/detail/data_shuttle.h> #include <torch/data/detail/sequencers.h> #include <torch/data/iterator.h> #include <torch/data/samplers/random.h> #include <torch/data/worker_exception.h> #include <torch/types.h> #include <torch/csrc/utils/variadic.h> #include <c10/util/Exception.h> #include <c10/util/irange.h> #include <cstddef> #include <exception> #include <memory> #include <thread> #include <utility> #include <vector> namespace torch::data { template <typename Dataset, typename Batch, typename BatchRequest> class DataLoaderBase { public: using BatchType = Batch; using BatchRequestType = BatchRequest; /// Constructs a new DataLoader from a `dataset` to sample from, `options` /// to configure the DataLoader with, and a `sampler` that specifies the /// sampling strategy. DataLoaderBase( DataLoaderOptions options, std::unique_ptr<Dataset> main_thread_dataset = nullptr) : options_(options), main_thread_dataset_(std::move(main_thread_dataset)), sequencer_(new_sequencer()) {} DataLoaderBase(const DataLoaderBase&) = delete; DataLoaderBase(DataLoaderBase&&) = delete; DataLoaderBase& operator=(const DataLoaderBase&) = delete; DataLoaderBase& operator=(DataLoaderBase&&) = delete; // NOLINTNEXTLINE(bugprone-exception-escape) virtual ~DataLoaderBase() { join(); } /// Returns an iterator into the DataLoader. The lifetime of the iterator is /// bound to the DataLoader. In C++ standards language, the category of the /// iterator is `OutputIterator`. See /// https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this /// means. In short: you may increment the iterator and dereference it, but /// cannot go back, or step forward more than one position at a time. When the /// DataLoader is exhausted, it will compare equal with the special /// "sentinel" iterator returned by `DataLoader::end()`. Most of the time, you /// should only use range-for loops to loop over the DataLoader, but /// standard algorithms like `std::copy(dataloader.begin(), dataloader.end(), /// output_iterator)` are supported too. Iterator<Batch> begin() { TORCH_CHECK( shuttle_.in_flight_jobs() == 0, "Attempted to get a new DataLoader iterator " "while another iterator is not yet exhausted"); reset(); return Iterator<Batch>(std::make_unique<detail::ValidIterator<Batch>>( [this] { return this->next(); })); } /// Returns a special "sentinel" iterator that compares equal with a /// non-sentinel iterator once the DataLoader is exhausted. Iterator<Batch> end() { return Iterator<Batch>(std::make_unique<detail::SentinelIterator<Batch>>()); } /// Joins the DataLoader's worker threads and drains internal queues. /// This function may only be invoked from the main thread (in which the /// DataLoader lives). void join() { if (joined_) { return; } shuttle_.drain(); // Send one 'quit' message per worker. Since a worker dies (exits its // thread) after receiving this message, each `QuitWorker()` message will be // read by exactly one worker. for ([[maybe_unused]] const auto w : c10::irange(options_.workers)) { push_job(QuitWorker()); } for (auto& worker : workers_) { worker.join(); } joined_ = true; } /// Returns the options with which the DataLoader was configured. const FullDataLoaderOptions& options() const noexcept { return options_; } protected: /// Simple mix-in to give something a sequence number. struct Sequenced { Sequenced() = default; Sequenced(size_t sqn) : sequence_number(sqn) {} size_t sequence_number; }; struct QuitWorker {}; /// A `Job` is either a `BatchRequest` (new indices to fetch data at) or a /// `QuitWorker` object, to indicate the worker should shut down. struct Job : Sequenced { Job() = default; Job(QuitWorker q, size_t sqn) : Sequenced(sqn), quit(q) {} Job(BatchRequest&& i, size_t sqn) : Sequenced(sqn), batch_request(std::move(i)) {} std::optional<QuitWorker> quit; std::optional<BatchRequest> batch_request; }; /// The finished result of a job. struct Result : Sequenced { Result() = default; Result(std::optional<Batch>&& b, size_t sqn) : Sequenced(sqn), batch(std::move(b)) {} Result(std::exception_ptr exception, size_t sqn) : Sequenced(sqn), exception(std::move(exception)) {} std::optional<Batch> batch; std::exception_ptr exception; }; /// Subclass hook for getting the next batch request. The stateless case will /// ask the sampler for a new batch request (e.g. a vector of indices), while /// the stateful one will simply return the batch size. virtual std::optional<BatchRequestType> get_batch_request() = 0; /// Resets the internal state of the DataLoader, optionally pre-fetching /// new jobs. virtual void reset() { shuttle_.drain(); sequence_number_ = 0; sequencer_ = new_sequencer(); prefetch(); } /// Schedules `requested_jobs` many new batches to be fetched. The actual /// number of jobs scheduled may be less if the DataLoader exhausts. void prefetch(size_t requested_jobs) { for ([[maybe_unused]] const auto r : c10::irange(requested_jobs)) { if (auto batch_request = get_batch_request()) { this->push_job(std::move(*batch_request)); } else { break; } } } /// Schedules the maximum number of jobs (based on the `max_jobs` option). void prefetch() { prefetch(options_.max_jobs); } /// Returns the next batch of data, or an empty `optional` if the DataLoader /// is exhausted. This operation will block until a batch is available if one /// is still expected. std::optional<BatchType> next() { if (options_.workers > 0) { while (std::optional<Result> result = this->pop_result()) { if (result->exception) { throw WorkerException(result->exception); } else if (result->batch) { prefetch(1); return std::move(result->batch); } } } else if (auto batch_request = get_batch_request()) { return this->main_thread_dataset_->get_batch(std::move(*batch_request)); } return std::nullopt; } /// The function that worker threads run. void worker_thread(Dataset& dataset) { while (true) { auto job = shuttle_.pop_job(); if (job.quit) { break; } try { auto batch = dataset.get_batch(std::move(*job.batch_request)); shuttle_.push_result({std::move(batch), job.sequence_number}); } catch (...) { shuttle_.push_result({std::current_exception(), job.sequence_number}); } } } /// Convenience method that calls `shuttle_.push_job()` with the next sequence /// number. template <typename T> void push_job(T value) { shuttle_.push_job({std::move(value), sequence_number_++}); } /// Convenience method that gets the next result from the sequencer. std::optional<Result> pop_result() { return sequencer_->next( [this] { return this->shuttle_.pop_result(this->options_.timeout); }); } /// Convenience method that creates a new sequencer based on the /// `enforce_ordering` option. std::unique_ptr<detail::sequencers::Sequencer<Result>> new_sequencer() { if (options_.enforce_ordering) { return std::make_unique<detail::sequencers::OrderedSequencer<Result>>( options_.max_jobs); } return std::make_unique<detail::sequencers::NoSequencer<Result>>(); } /// The options the DataLoader was configured with. // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) const FullDataLoaderOptions options_; /// The dataset for the main thread, only has a value if the number of /// worker threads was configured as zero, meaning the main thread has to do /// all the work (synchronously). NOTE: Really want this to be on the heap /// when empty, therefore `unique_ptr` and not `optional`. // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) std::unique_ptr<Dataset> main_thread_dataset_; /// The sequence number for the *next* batch to be retrieved from the /// dataset. // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) size_t sequence_number_ = 0; /// The worker threads, running the `worker_thread()` method. // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) std::vector<std::thread> workers_; /// The `DataShuttle` which takes care of the life cycle of a job. // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) detail::DataShuttle<Job, Result> shuttle_; /// The `Sequencer`, which handles optional ordering of batches. // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) std::unique_ptr<detail::sequencers::Sequencer<Result>> sequencer_; /// True if the DataLoader has joined its worker threads. // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) bool joined_ = false; }; } // namespace torch::data ```
============================================================================================================================================================ SOURCE CODE FILE: dataloader_options.h LINES: 1 SIZE: 2.21 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\dataloader_options.h ENCODING: utf-8 ```h #pragma once #include <torch/arg.h> #include <torch/types.h> #include <chrono> #include <cstddef> namespace torch::data { /// Options to configure a `DataLoader`. struct DataLoaderOptions { DataLoaderOptions() = default; /* implicit */ DataLoaderOptions(size_t batch_size) : batch_size_(batch_size) {} /// The size of each batch to fetch. TORCH_ARG(size_t, batch_size) = 1; /// The number of worker threads to launch. If zero, the main thread will /// synchronously perform the data loading. TORCH_ARG(size_t, workers) = 0; /// The maximum number of jobs to enqueue for fetching by worker threads. /// Defaults to two times the number of worker threads. TORCH_ARG(std::optional<size_t>, max_jobs); /// An optional limit on the time to wait for the next batch. TORCH_ARG(std::optional<std::chrono::milliseconds>, timeout); /// Whether to enforce ordering of batches when multiple are loaded /// asynchronously by worker threads. Set to `false` for better performance if /// you do not care about determinism. TORCH_ARG(bool, enforce_ordering) = true; /// Whether to omit the last batch if it contains less than `batch_size` /// examples. TORCH_ARG(bool, drop_last) = false; }; /// Like `DataLoaderOptions`, but without any unconfigured state. /// `DataLoaderOptions` has some options that depend on other options /// (`max_jobs` => `2 * workers`). In the spirit of properly using the C++ type /// system, `DataLoaderOptions` allows only setting values. To access values, /// you must create a `FullDataLoaderOptions` from a `DataLoaderOptions` /// instance, which will do any necessary coalescing. struct FullDataLoaderOptions { explicit FullDataLoaderOptions(DataLoaderOptions options) : batch_size(options.batch_size()), workers(options.workers()), max_jobs(options.max_jobs().value_or(2 * workers)), timeout(options.timeout()), enforce_ordering(options.enforce_ordering()), drop_last(options.drop_last()) {} size_t batch_size; size_t workers; size_t max_jobs; std::optional<std::chrono::milliseconds> timeout; bool enforce_ordering; bool drop_last; }; } // namespace torch::data ```
============================================================================================================================================================= SOURCE CODE FILE: stateful.h LINES: 1 SIZE: 2.34 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\dataloader\stateful.h ENCODING: utf-8 ```h #pragma once #include <c10/util/irange.h> #include <torch/data/dataloader/base.h> #include <cstddef> #include <thread> #include <utility> namespace torch::data { /// A dataloader for stateful datasets. /// /// A dataloader for stateful datatasets differs from one for stateless /// datasets one in that the dataset is shared among worker threads, and that /// this dataset is itself responsible for producing batches rather than /// depending on a sampler. The statefulness here actually refers to the /// dataset. The StatefulDataLoader simply alters the data loading algorithm to /// accommodate the stateful, shared nature of the dataset. Note that the /// dataset must be thread safe if more than one worker thread is used. /// /// A stateful dataloader is created by calling `make_data_loader` with a /// stateful dataset. template <typename Dataset> class StatefulDataLoader : public DataLoaderBase< Dataset, typename Dataset::BatchType::value_type, typename Dataset::BatchRequestType> { public: using super = DataLoaderBase< Dataset, typename Dataset::BatchType::value_type, typename Dataset::BatchRequestType>; using typename super::BatchRequestType; /// Constructs the `StatefulDataLoader` from a `dataset` and some `options`. StatefulDataLoader(Dataset dataset, DataLoaderOptions options) : super(options, std::make_unique<Dataset>(std::move(dataset))) { for ([[maybe_unused]] const auto _ : c10::irange(this->options_.workers)) { // As opposed to the stateless case, here all worker threads access the // same underlying dataset. this->workers_.emplace_back( [this] { this->worker_thread(*this->main_thread_dataset_); }); } } private: /// Resets the internal state of the dataloader and the dataset. void reset() override { this->main_thread_dataset_->reset(); // Call the base class method last because it calls `prefetch()` super::reset(); } /// For stateful datasets, the batch request is always the batch size. The /// dataset is responsible for determining what goes into the batch next. std::optional<BatchRequestType> get_batch_request() override { return this->options_.batch_size; } }; } // namespace torch::data ```
============================================================================================================================================================== SOURCE CODE FILE: stateless.h LINES: 1 SIZE: 2.76 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\dataloader\stateless.h ENCODING: utf-8 ```h #pragma once #include <torch/data/dataloader/base.h> #include <torch/data/worker_exception.h> #include <c10/util/Exception.h> #include <c10/util/irange.h> #include <cstddef> #include <thread> #include <utility> namespace torch::data { /// A dataloader for stateless datasets. /// /// This dataloader follows the traditional PyTorch dataloader design, whereby a /// (posssibly) stateful sampler produces *batch requests* for a stateless /// dataset, which acts as a simple batch request to batch mapping. The batch /// request will often be an array of indices, and if the dataset is a simple /// image dataset, the dataset would produce the images at those indices. template <typename Dataset, typename Sampler> class StatelessDataLoader : public DataLoaderBase< Dataset, typename Dataset::BatchType, typename Sampler::BatchRequestType> { public: using super = DataLoaderBase< Dataset, typename Dataset::BatchType, typename Sampler::BatchRequestType>; using typename super::BatchRequestType; /// Constructs the `StatelessDataLoader` from a `dataset`, a `sampler` and /// some `options`. StatelessDataLoader( Dataset dataset, Sampler sampler, DataLoaderOptions options) : super(options), sampler_(std::move(sampler)) { for (const auto w : c10::irange(this->options_.workers)) { // Here we copy the dataset into the worker thread closure. Each worker // has its own copy of the dataset. This means the dataset must be // trivially copiable, or else we don't expect more than one worker to // be in use. (void)w; // Suppress unused variable warning this->workers_.emplace_back( [this, dataset]() mutable { this->worker_thread(dataset); }); } if (this->options_.workers == 0) { this->main_thread_dataset_ = std::make_unique<Dataset>(std::move(dataset)); } } private: /// Resets the internal state of the dataloader and the sampler. void reset() override { sampler_.reset(); // Call the base class method last because it calls `prefetch()` super::reset(); } /// Queries the sampler for the next batch request (possibly progressing its /// internal state). std::optional<BatchRequestType> get_batch_request() override { auto indices = sampler_.next(this->options_.batch_size); if (!indices || (indices->size() < this->options_.batch_size && this->options_.drop_last)) { return std::nullopt; } AT_ASSERT(indices->size() > 0); return indices; } /// The `Sampler` used to produce batch requests. Sampler sampler_; }; } // namespace torch::data ```
================================================================================================================================================== SOURCE CODE FILE: datasets.h LINES: 1 SIZE: 0.29 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\datasets.h ENCODING: utf-8 ```h #pragma once #include <torch/data/datasets/base.h> #include <torch/data/datasets/chunk.h> #include <torch/data/datasets/map.h> #include <torch/data/datasets/mnist.h> #include <torch/data/datasets/shared.h> #include <torch/data/datasets/stateful.h> #include <torch/data/datasets/tensor.h> ```
======================================================================================================================================================= SOURCE CODE FILE: base.h LINES: 1 SIZE: 3.20 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\datasets\base.h ENCODING: utf-8 ```h #pragma once #include <torch/data/example.h> #include <torch/types.h> #include <c10/util/ArrayRef.h> #include <cstddef> #include <cstdint> #include <type_traits> #include <utility> #include <vector> namespace torch::data::datasets { template <typename S, typename T> class MapDataset; template <typename D, typename T> MapDataset<D, T> map(D, T); // NOLINT } // namespace torch::data::datasets namespace torch::data::datasets { namespace detail { template <typename T> struct is_optional : std::false_type {}; template <typename T> struct is_optional<std::optional<T>> : std::true_type {}; } // namespace detail /// A dataset that can yield data only in batches. template < typename Self, typename Batch = std::vector<Example<>>, typename BatchRequest = ArrayRef<size_t>> class BatchDataset { public: using SelfType = Self; using BatchType = Batch; using BatchRequestType = BatchRequest; constexpr static bool is_stateful = detail::is_optional<BatchType>::value; virtual ~BatchDataset() = default; /// Returns a batch of data given an index. virtual Batch get_batch(BatchRequest request) = 0; /// Returns the size of the dataset, or an empty std::optional if it is /// unsized. virtual std::optional<size_t> size() const = 0; /// Creates a `MapDataset` that applies the given `transform` to this dataset. template <typename TransformType> MapDataset<Self, TransformType> map(TransformType transform) & { return datasets::map(static_cast<Self&>(*this), std::move(transform)); } /// Creates a `MapDataset` that applies the given `transform` to this dataset. template <typename TransformType> MapDataset<Self, TransformType> map(TransformType transform) && { return datasets::map( std::move(static_cast<Self&>(*this)), std::move(transform)); } }; /// A dataset that can yield data in batches, or as individual examples. /// /// A `Dataset` is a `BatchDataset`, because it supports random access and /// therefore batched access is implemented (by default) by calling the random /// access indexing function for each index in the requested batch of indices. /// This can be customized. template <typename Self, typename SingleExample = Example<>> class Dataset : public BatchDataset<Self, std::vector<SingleExample>> { public: using ExampleType = SingleExample; /// Returns the example at the given index. virtual ExampleType get(size_t index) = 0; /// Returns a batch of data. /// The default implementation calls `get()` for every requested index /// in the batch. std::vector<ExampleType> get_batch(ArrayRef<size_t> indices) override { std::vector<ExampleType> batch; batch.reserve(indices.size()); for (const auto i : indices) { batch.push_back(get(i)); } return batch; } }; /// A `StreamDataset` represents a dataset that is a potentially infinite /// stream. It takes as batch index only a number, which is the batch size, and /// yields that many elements from the stream. template <typename Self, typename Batch = std::vector<Example<>>> using StreamDataset = BatchDataset<Self, Batch, /*BatchRequest=*/size_t>; } // namespace torch::data::datasets ```
======================================================================================================================================================== SOURCE CODE FILE: chunk.h LINES: 2 SIZE: 19.23 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\datasets\chunk.h ENCODING: utf-8 ```h #pragma once #include <c10/util/irange.h> #include <torch/arg.h> #include <torch/data/datasets/stateful.h> #include <torch/data/samplers.h> #include <queue> #include <thread> #include <utility> #include <torch/serialize.h> namespace torch::data::datasets { /// Interface for chunk reader, which performs data chunking and reading of /// entire chunks. /// /// A chunk could be an entire file, such as an audio data file or an image, /// or part of a file in the case of a large text-file split based on seek /// positions. template < typename ExampleType_, typename ChunkType_ = std::vector<ExampleType_>> class ChunkDataReader { public: virtual ~ChunkDataReader() = default; using ChunkType = ChunkType_; using ExampleType = ExampleType_; /// Read an entire chunk. virtual ChunkType read_chunk(size_t chunk_index) = 0; /// Returns the number of chunks available in this reader. virtual size_t chunk_count() = 0; /// This will clear any internal state associate with this reader. virtual void reset() = 0; }; namespace detail { /// BatchDataBuffer manages a queue of UnwrappedBatchData. After a new chunk is /// loaded, BatchDataBuffer splits it into small batches and push them into the /// queue. When get_batch is called from data loader, it pops cached batches and /// return. If the cache is empty, it either waits to load more chunks or return /// null if all chunks are loaded. template < typename UnwrappedBatch, typename ExampleSampler = samplers::RandomSampler> class BatchDataBuffer { public: using UnwrappedBatchType = UnwrappedBatch; using BatchType = std::optional<UnwrappedBatchType>; using BatchRequestType = typename ExampleSampler::BatchRequestType; BatchDataBuffer( size_t batch_size, ExampleSampler& example_sampler, size_t queue_capacity) : batch_size_(batch_size), example_sampler_(example_sampler), queue_capacity_(queue_capacity) {} /// Return batch data from the queue. Called from the ChunkDataset main /// thread. BatchType get_batch() { std::unique_lock<std::mutex> lock(queue_mutex_); cv_read_.wait(lock, [this] { // wait till there is available data in the queue or if all chunks are // loaded (i.e. the dataset is exhausted for this epoch) return ( this->total_example_count_in_queue_ >= batch_size_ || this->stop_); }); if (batch_queue_.empty()) { AT_ASSERT(stop_); // All batches have been retrieved. Return an empty batch. return std::nullopt; } UnwrappedBatchData batch = std::move(batch_queue_.front()); batch_queue_.pop(); if (batch.exception) { throw WorkerException(batch.exception); } total_example_count_in_queue_ -= batch.batch_data.size(); lock.unlock(); cv_write_.notify_all(); return batch.batch_data; } /// Push preloaded chunks to batch queue. Called from the ChunkDataset worker /// threads. void add_chunk_data(UnwrappedBatchType data) { std::unique_lock<std::mutex> lock(queue_mutex_); cv_write_.wait(lock, [this] { // stop loading if we have preloaded enough data. return this->total_example_count_in_queue_ < this->queue_capacity_ || this->stop_; }); if (stop_) { // When stop_ is true, it means no further chunk loading is necessary. // Return without any further processing. return; } auto data_size = data.size(); auto remaining_size = data_size; example_sampler_.reset(data_size); auto fill_batch = [&](size_t example_count, UnwrappedBatchType& batch) { auto batch_example_indices = this->example_sampler_.next(example_count); AT_ASSERT( batch_example_indices && batch_example_indices.value().size() == example_count); BatchRequestType& indices = batch_example_indices.value(); for (size_t i : indices) { TORCH_CHECK(i < data_size, "Index out of range"); batch.emplace_back(std::move(data[i])); } remaining_size -= example_count; }; if (!batch_queue_.empty()) { // if the queue has existing data, and the last batch doesn't have enough // examples to fill a batch_size batch, add more example to this batch // first. auto& batch = batch_queue_.back(); size_t current_count = batch.batch_data.size(); if (current_count < batch_size_) { auto example_count = std::min(remaining_size, batch_size_ - current_count); fill_batch(example_count, batch.batch_data); } } // If we still have data remaining after filling the last pushed batch, add // them to the queue too. while (remaining_size > 0) { UnwrappedBatchType current_batch; // Allocate the batch memory ahead of time. current_batch.reserve(batch_size_); auto example_count = std::min(remaining_size, batch_size_); fill_batch(example_count, current_batch); batch_queue_.emplace(std::move(current_batch)); } total_example_count_in_queue_ += data_size; lock.unlock(); cv_read_.notify_all(); } /// Push exceptions thrown during preloading into batch queue. Called from /// the ChunkDataset worker threads. void add_chunk_data(std::exception_ptr e_ptr) { std::unique_lock<std::mutex> lock(queue_mutex_); cv_write_.wait(lock, [this] { // stop loading if we have preloaded enough data. return ( this->total_example_count_in_queue_ < this->queue_capacity_ || this->stop_); }); if (stop_) { // When stop_ is true, it means this current thread needs to be tore down, // the batch buffer will be discarded, so no need to enqueue any new // exceptions. return; } batch_queue_.emplace(e_ptr); lock.unlock(); cv_read_.notify_all(); } void stop() { { // Hold the lock before changing stop_ to prevent a race condition which // can cause a deadlock. To be more specific, conditional variable // cv_write_ waits on predicate stop_ in add_chunk_data(). The wait // happens in two steps: 1) while still holding the lock, check if // predicate is true; 2) if it is true, proceeds, otherwise, release the // lock and wait until notified. Without holding a lock, cv_write_'s // notification can happen in between step 1) and 2). In that case, as // cv_write_ is not in waiting status yet, so the notification is lost and // cv_write_ will sleep forever. By taking a lock before changing // predicate stop_, it is ensured updating and evaluating stop_ always // happen in a synchronized way std::lock_guard<std::mutex> lock(queue_mutex_); stop_ = true; } // notify all writers, wake them from wait to exit current method. cv_write_.notify_all(); // notify all readers too. cv_read_.notify_all(); } /// The batch size is needed to create batches from the chunk data. Similar to /// regular dataloader where the batches are created with prefetches, /// BatchDataBuffer perform the batch creation using the provided batch size. size_t batch_size_ = 0; /// count of total example stored in the queue size_t total_example_count_in_queue_ = 0; /// struct that contains a raw unwrapped batch unit. An unwrapped batch unit /// is the raw data without 'optional' wrapper. It can be a collection of /// images, utterances, e.t.c. struct UnwrappedBatchData { explicit UnwrappedBatchData(UnwrappedBatchType data) : batch_data(std::move(data)) {} explicit UnwrappedBatchData(std::exception_ptr e) : exception(std::move(e)) {} /// batch data to return UnwrappedBatchType batch_data; /// exception pointer which captures any abnormal exceptions while creating /// the batch. std::exception_ptr exception; }; /// local cache to store example batches from loaded chunk std::queue<UnwrappedBatchData> batch_queue_; // sync batch_queue_ update. std::mutex queue_mutex_; std::condition_variable cv_read_; std::condition_variable cv_write_; // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) ExampleSampler& example_sampler_; // configurable maximun number of elements the queue can hold at one time. size_t queue_capacity_; // When set to true, it wakes the writer threads from the wait and exit // current function call. This is needed when ChunkDataSet.Reset is called // while the previous epoch is not exhausted yet. When ChunkDataset is waiting // its preloader to finish previous work before tearing down the thread, the // preloader could be still waiting for the conditional variable, thus cause // the program to hang. This boolean is used to break this waiting condition. bool stop_ = false; }; } // namespace detail /// Options to configure a `ChunkDataset`. struct ChunkDatasetOptions { ChunkDatasetOptions() = delete; ChunkDatasetOptions( size_t preloader_count, size_t batch_size, size_t cache_size = 2048, size_t cross_chunk_shuffle_count = 1) : preloader_count_(preloader_count), batch_size_(batch_size), cache_size_(cache_size), cross_chunk_shuffle_count_(cross_chunk_shuffle_count) { TORCH_CHECK( preloader_count_ > 0, "Preloader count is 0. At least one preloader needs to be specified."); TORCH_CHECK( batch_size_ > 0, "Batch size is 0. A positive batch size needs to be specified."); TORCH_CHECK( cache_size_ > 0, "Cache size is 0. A positive cache size needs to be specified."); TORCH_CHECK( cache_size_ >= batch_size_, "Cache size is less than batch size. Cache needs to be large enough to " "hold at least one batch."); TORCH_CHECK( cross_chunk_shuffle_count_ > 0, "cross_chunk_shuffle_count needs to be greater than 0."); } /// The number of worker thread to preload chunk data. TORCH_ARG(size_t, preloader_count); /// The size of each batch. TORCH_ARG(size_t, batch_size); /// The capacity of the queue for batch caching. TORCH_ARG(size_t, cache_size) = 2048; // The number of chunks to perfrom cross-chunk shuffling. Default to 1 meaning // no cross-chunk shuffling. When it is equal to n (n > 1), n random // chunks will be loaded at once and example shuffling will be performed // across all those n chunks. // Note: Usually the default config (1 chunk shuffle + example shuffle) is // good enough to generate random distributed data. Use this parameter only if // you know cross-shuffle is needed in your case. Also there is a performance // penalty when this value is greater than 1, as we need to do extra merge // between multiple chunks before performing example sampling. TORCH_ARG(size_t, cross_chunk_shuffle_count) = 1; }; /// A stateful dataset that support hierarchical sampling and prefetching of /// entre chunks. /// /// Unlike regular dataset, chunk dataset require two samplers to operate and /// keeps an internal state. `ChunkSampler` selects, which chunk to load next, /// while the `ExampleSampler` determins the order of Examples that are returned /// in each `get_batch` call. The hierarchical sampling approach used here is /// inspired by this paper http://martin.zinkevich.org/publications/nips2010.pdf template < typename ChunkReader, typename ChunkSampler = samplers::RandomSampler, typename ExampleSampler = samplers::RandomSampler> class ChunkDataset final : public StatefulDataset< ChunkDataset<ChunkReader, ChunkSampler, ExampleSampler>, typename ChunkReader::BatchType, size_t> { public: using BatchType = std::optional<typename ChunkReader::BatchType>; using UnwrappedBatchType = typename ChunkReader::BatchType; using BatchRequestType = size_t; using ChunkSamplerType = ChunkSampler; using ExampleSamplerType = ExampleSampler; ChunkDataset( ChunkReader chunk_reader, ChunkSampler chunk_sampler, ExampleSampler example_sampler, ChunkDatasetOptions options, std::function<void(UnwrappedBatchType&)> preprocessing_policy = std::function<void(UnwrappedBatchType&)>()) : chunk_reader_(std::move(chunk_reader)), chunk_sampler_(std::move(chunk_sampler)), example_sampler_(std::move(example_sampler)), options_(options), preprocessing_policy_(std::move(preprocessing_policy)), quit_worker_(false), running_preloaders_(0) {} ~ChunkDataset() override { // stop batch buffer first. if (batch_buffer_) { batch_buffer_->stop(); } free_workers(); } /// Default get_batch method of BatchDataset. This method returns /// Example batches created from the preloaded chunks. The implemenation /// is dataset agnostic and does not need overriding in different chunk /// datasets. BatchType get_batch(size_t batch_size) override { TORCH_CHECK( batch_buffer_ != nullptr, "Dataset needs to call reset() before calling get_batch()."); TORCH_CHECK( batch_size == options_.batch_size(), "The requested batch size does not match with the initialized batch size.\n" " The requested batch size is ", batch_size, ", while the dataset is created with batch size equal to ", options_.batch_size()); return batch_buffer_->get_batch(); } /// Helper method around get_batch as `batch_size` is not strictly necessary BatchType get_batch() { return get_batch(options_.batch_size()); } /// This will clear any internal state and starts the internal prefetching /// mechanism for the chunk dataset. void reset() override { // We need this to support partial data reads via dataloader iterator. if (batch_buffer_) { batch_buffer_->stop(); } // free workers from previous reset if there is any. free_workers(); preload_threads_.clear(); if (!load_checkpoint_) { chunk_reader_.reset(); chunk_sampler_.reset(chunk_reader_.chunk_count()); load_checkpoint_ = false; } // Throw out any existing cached batch in the buffer and re-creates a new // chunk buffer. batch_buffer_ = std::make_unique< detail::BatchDataBuffer<UnwrappedBatchType, ExampleSamplerType>>( options_.batch_size(), example_sampler_, options_.cache_size()); // create new workers for this new epoch. quit_worker_ = false; AT_ASSERT(running_preloaders_ == 0); running_preloaders_ = options_.preloader_count(); for (const auto i : c10::irange(options_.preloader_count())) { preload_threads_.emplace_back([this, i]() { this->preloader(i); }); } } /// size is not used for chunk dataset. std::optional<size_t> size() const override { return std::nullopt; } // provide a references to chunk sampler. Used mainly in distributed data // loading to set the epoch number for the sampler. ChunkSamplerType& chunk_sampler() { return chunk_sampler_; } void save(serialize::OutputArchive& archive) const override { std::lock_guard<std::mutex> lock(chunk_index_guard_); chunk_sampler_.save(archive); } void load(serialize::InputArchive& archive) override { std::lock_guard<std::mutex> lock(chunk_index_guard_); chunk_sampler_.load(archive); load_checkpoint_ = true; } private: /// running on worker thread to preload chunk data. void preloader(size_t id) { while (!quit_worker_.load()) { try { std::vector<size_t> chunk_idx; { std::lock_guard<std::mutex> lock(chunk_index_guard_); if (auto chunk_sampler_result = chunk_sampler_.next( this->options_.cross_chunk_shuffle_count())) { chunk_idx = chunk_sampler_result.value(); } else { break; } } UnwrappedBatchType data = chunk_reader_.read_chunk(chunk_idx[0]); for (const auto i : c10::irange(1, chunk_idx.size())) { auto chunk_data = chunk_reader_.read_chunk(chunk_idx[i]); std::move( chunk_data.begin(), chunk_data.end(), std::back_inserter(data)); } if (preprocessing_policy_) { preprocessing_policy_(data); } if (!data.empty()) { // skip empty chunks. batch_buffer_->add_chunk_data(std::move(data)); } } catch (...) { batch_buffer_->add_chunk_data(std::current_exception()); } } AT_ASSERT(running_preloaders_.load() > 0); --running_preloaders_; if (running_preloaders_.load() == 0) { // all preloaders are completed, so we can notify the batch_buffer. batch_buffer_->stop(); } } /// Block the current thread until the workers finish execution and exit. void free_workers() { if (!quit_worker_.load()) { quit_worker_ = true; for (auto& worker_thread : preload_threads_) { worker_thread.join(); } } } private: // Templated class that defines what is a chunk and how to read chunk data. // When a chunk is returned by chunk_reader_, ChunkDataset split it into // batches and caches them in batch_buffer_. ChunkReader chunk_reader_; // chunk sampler to shuffle different chunks ChunkSamplerType chunk_sampler_; // example sampler to shuffle examples in a specific chunk ExampleSamplerType example_sampler_; // batch data buffer which holds chunk data from preloading thread. std::shared_ptr< detail::BatchDataBuffer<UnwrappedBatchType, ExampleSamplerType>> batch_buffer_; // worker thread pool std::vector<std::thread> preload_threads_; /// The options the Dataset was configured with. // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) const ChunkDatasetOptions options_; // function pointer wrapper to apply custom processing over chunk data. This // is considered an advanced parameter for developers who want to apply a // pre-process to the chunk data before sampling into minibatch. // Different than the collate function, this policy is applied on the chunk // level, instead of minibatch level. When a chunk of data is loaded (multiple // chunks if cross_chunk_shuffle_count_ is greater than 1), this policy is // applied to the full loaded data. It is useful if developers want to // perform pre-processing (like bucketing) to the chunk data before // example sampler samples the data. By default it's an empty pointer and no // action will be taken. std::function<void(UnwrappedBatchType&)> preprocessing_policy_; // indicate whether the worker thread can be teared down std::atomic<bool> quit_worker_; // keep track of running preloaders to notify batch buffer. A value 0 // indicates that the chunk loading is completed. std::atomic<size_t> running_preloaders_; // mutex to synchronize chunk sampler next() call. mutable std::mutex chunk_index_guard_; // boolean value to indicate whether we need to load the checkpoint for // chunk_sampler_. bool load_checkpoint_{false}; }; } // namespace torch::data::datasets ```
====================================================================================================================================================== SOURCE CODE FILE: map.h LINES: 1 SIZE: 4.09 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\datasets\map.h ENCODING: utf-8 ```h #pragma once #include <torch/data/datasets/base.h> #include <torch/types.h> #include <c10/util/ArrayRef.h> #include <cstddef> #include <type_traits> #include <utility> namespace torch::data::datasets { namespace detail { template <bool C, typename T> using optional_if_t = std::conditional_t<C, std::optional<T>, T>; } // namespace detail /// A `MapDataset` is a dataset that applies a transform to a source dataset. template <typename SourceDataset, typename AppliedTransform> class MapDataset : public BatchDataset< MapDataset<SourceDataset, AppliedTransform>, detail::optional_if_t< SourceDataset::is_stateful, typename AppliedTransform::OutputBatchType>, typename SourceDataset::BatchRequestType> { public: using DatasetType = SourceDataset; using TransformType = AppliedTransform; using BatchRequestType = typename SourceDataset::BatchRequestType; using OutputBatchType = detail::optional_if_t< SourceDataset::is_stateful, typename AppliedTransform::OutputBatchType>; MapDataset(DatasetType dataset, TransformType transform) : dataset_(std::move(dataset)), transform_(std::move(transform)) {} /// Gets a batch from the source dataset and applies the transform to it, /// returning the result. OutputBatchType get_batch(BatchRequestType indices) override { return get_batch_impl(std::move(indices)); } /// Returns the size of the source dataset. // NOLINTNEXTLINE(bugprone-exception-escape) std::optional<size_t> size() const noexcept override { return dataset_.size(); } /// Calls `reset()` on the underlying dataset. /// NOTE: Stateless datasets do not have a reset() method, so a call to this /// method will only compile for stateful datasets (which have a reset() /// method). void reset() { dataset_.reset(); } /// Returns the underlying dataset. const SourceDataset& dataset() noexcept { return dataset_; } /// Returns the transform being applied. const AppliedTransform& transform() noexcept { return transform_; } private: /// The implementation of `get_batch()` for the stateless case, which simply /// applies the transform to the output of `get_batch()` from the dataset. template < typename D = SourceDataset, typename = std::enable_if_t<!D::is_stateful>> OutputBatchType get_batch_impl(BatchRequestType indices) { return transform_.apply_batch(dataset_.get_batch(std::move(indices))); } /// The implementation of `get_batch()` for the stateful case. Here, we follow /// the semantics of `Optional.map()` in many functional languages, which /// applies a transformation to the optional's content when the optional /// contains a value, and returns a new optional (of a different type) if the /// original optional returned by `get_batch()` was empty. template <typename D = SourceDataset> std::enable_if_t<D::is_stateful, OutputBatchType> get_batch_impl( BatchRequestType indices) { if (auto batch = dataset_.get_batch(std::move(indices))) { return transform_.apply_batch(std::move(*batch)); } return std::nullopt; } /// The underlying dataset being transformed. SourceDataset dataset_; // The transformation that is applied to batches received from the dataset. AppliedTransform transform_; }; /// Creates a `MapDataset` with the given dataset and transform. template <typename DatasetType, typename TransformType> MapDataset<DatasetType, TransformType> map( DatasetType dataset, TransformType transform) { static_assert( std::is_same_v< std::conditional_t< DatasetType::is_stateful, typename DatasetType::BatchType::value_type, typename DatasetType::BatchType>, typename TransformType::InputBatchType>, "BatchType type of dataset does not match input type of transform"); return {std::move(dataset), std::move(transform)}; } } // namespace torch::data::datasets ```
======================================================================================================================================================== SOURCE CODE FILE: mnist.h LINES: 1 SIZE: 1.24 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\datasets\mnist.h ENCODING: utf-8 ```h #pragma once #include <torch/data/datasets/base.h> #include <torch/data/example.h> #include <torch/types.h> #include <torch/csrc/Export.h> #include <cstddef> #include <string> namespace torch::data::datasets { /// The MNIST dataset. class TORCH_API MNIST : public Dataset<MNIST> { public: /// The mode in which the dataset is loaded. enum class Mode { kTrain, kTest }; /// Loads the MNIST dataset from the `root` path. /// /// The supplied `root` path should contain the *content* of the unzipped /// MNIST dataset, available from http://yann.lecun.com/exdb/mnist. explicit MNIST(const std::string& root, Mode mode = Mode::kTrain); /// Returns the `Example` at the given `index`. Example<> get(size_t index) override; /// Returns the size of the dataset. std::optional<size_t> size() const override; /// Returns true if this is the training subset of MNIST. // NOLINTNEXTLINE(bugprone-exception-escape) bool is_train() const noexcept; /// Returns all images stacked into a single tensor. const Tensor& images() const; /// Returns all targets stacked into a single tensor. const Tensor& targets() const; private: Tensor images_, targets_; }; } // namespace torch::data::datasets ```
========================================================================================================================================================= SOURCE CODE FILE: shared.h LINES: 1 SIZE: 2.61 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\datasets\shared.h ENCODING: utf-8 ```h #pragma once #include <torch/data/datasets/base.h> #include <memory> #include <utility> namespace torch::data::datasets { /// A dataset that wraps another dataset in a shared pointer and implements the /// `BatchDataset` API, delegating all calls to the shared instance. This is /// useful when you want all worker threads in the dataloader to access the same /// dataset instance. The dataset must take care of synchronization and /// thread-safe access itself. /// /// Use `torch::data::datasets::make_shared_dataset()` to create a new /// `SharedBatchDataset` like you would a `std::shared_ptr`. template <typename UnderlyingDataset> class SharedBatchDataset : public BatchDataset< SharedBatchDataset<UnderlyingDataset>, typename UnderlyingDataset::BatchType, typename UnderlyingDataset::BatchRequestType> { public: using BatchType = typename UnderlyingDataset::BatchType; using BatchRequestType = typename UnderlyingDataset::BatchRequestType; /// Constructs a new `SharedBatchDataset` from a `shared_ptr` to the /// `UnderlyingDataset`. /* implicit */ SharedBatchDataset( std::shared_ptr<UnderlyingDataset> shared_dataset) : dataset_(std::move(shared_dataset)) {} /// Calls `get_batch` on the underlying dataset. BatchType get_batch(BatchRequestType request) override { return dataset_->get_batch(std::move(request)); } /// Returns the `size` from the underlying dataset. std::optional<size_t> size() const override { return dataset_->size(); } /// Accesses the underlying dataset. UnderlyingDataset& operator*() { return *dataset_; } /// Accesses the underlying dataset. const UnderlyingDataset& operator*() const { return *dataset_; } /// Accesses the underlying dataset. UnderlyingDataset* operator->() { return dataset_.get(); } /// Accesses the underlying dataset. const UnderlyingDataset* operator->() const { return dataset_.get(); } /// Calls `reset()` on the underlying dataset. void reset() { dataset_->reset(); } private: std::shared_ptr<UnderlyingDataset> dataset_; }; /// Constructs a new `SharedBatchDataset` by creating a /// `shared_ptr<UnderlyingDatase>`. All arguments are forwarded to /// `make_shared<UnderlyingDataset>`. template <typename UnderlyingDataset, typename... Args> SharedBatchDataset<UnderlyingDataset> make_shared_dataset(Args&&... args) { return std::make_shared<UnderlyingDataset>(std::forward<Args>(args)...); } } // namespace torch::data::datasets ```
=========================================================================================================================================================== SOURCE CODE FILE: stateful.h LINES: 1 SIZE: 2.24 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\datasets\stateful.h ENCODING: utf-8 ```h #pragma once #include <torch/data/datasets/base.h> #include <torch/data/example.h> #include <cstddef> #include <vector> namespace torch::serialize { class OutputArchive; class InputArchive; } // namespace torch::serialize namespace torch::data::datasets { /// A stateful dataset is a dataset that maintains some internal state, which /// will be `reset()` at the beginning of each epoch. Subclasses can override /// the `reset()` method to configure this behavior. Further, the return type of /// a stateful dataset's `get_batch()` method is always an `optional`. When the /// stateful dataset wants to indicate to the dataloader that its epoch has /// ended, it should return an empty optional. The dataloader knows to modify /// its implementation based on whether the dataset is stateless or stateful. /// /// Note that when subclassing a from `StatefulDataset<Self, T>`, the return /// type of `get_batch()`, which the subclass must override, will be /// `optional<T>` (i.e. the type specified in the `StatefulDataset` /// specialization is automatically boxed into an `optional` for the dataset's /// `BatchType`). template < typename Self, typename Batch = std::vector<Example<>>, typename BatchRequest = size_t> class StatefulDataset : public BatchDataset<Self, std::optional<Batch>, BatchRequest> { public: /// Resets internal state of the dataset. virtual void reset() = 0; /// Saves the statefulDataset's state to OutputArchive. virtual void save(serialize::OutputArchive& archive) const = 0; /// Deserializes the statefulDataset's state from the `archive`. virtual void load(serialize::InputArchive& archive) = 0; }; /// Serializes a statefulDataset to `OutputArchive`. template <typename... Args> serialize::OutputArchive& operator<<( serialize::OutputArchive& archive, const StatefulDataset<Args...>& statefulDataset) { statefulDataset.save(archive); return archive; } /// Deserializes a statefulDataset from an `InputArchive`. template <typename... Args> serialize::InputArchive& operator>>( serialize::InputArchive& archive, StatefulDataset<Args...>& statefulDataset) { statefulDataset.load(archive); return archive; } } // namespace torch::data::datasets ```
========================================================================================================================================================= SOURCE CODE FILE: tensor.h LINES: 1 SIZE: 0.94 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\datasets\tensor.h ENCODING: utf-8 ```h #pragma once #include <torch/data/datasets/base.h> #include <torch/data/example.h> #include <torch/types.h> #include <cstddef> #include <vector> namespace torch::data::datasets { /// A dataset of tensors. /// Stores a single tensor internally, which is then indexed inside `get()`. struct TensorDataset : public Dataset<TensorDataset, TensorExample> { /// Creates a `TensorDataset` from a vector of tensors. explicit TensorDataset(const std::vector<Tensor>& tensors) : TensorDataset(torch::stack(tensors)) {} explicit TensorDataset(torch::Tensor tensor) : tensor(std::move(tensor)) {} /// Returns a single `TensorExample`. TensorExample get(size_t index) override { return tensor[static_cast<int64_t>(index)]; } /// Returns the number of tensors in the dataset. std::optional<size_t> size() const override { return tensor.size(0); } Tensor tensor; }; } // namespace torch::data::datasets ```
============================================================================================================================================================= SOURCE CODE FILE: data_shuttle.h LINES: 1 SIZE: 2.61 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\detail\data_shuttle.h ENCODING: utf-8 ```h #pragma once #include <torch/data/detail/queue.h> #include <torch/types.h> #include <c10/util/Exception.h> #include <optional> #include <chrono> #include <utility> namespace torch::data::detail { /// Encapsulates the full life cycle of DataLoader jobs. /// /// When a new job is enqueued to the `DataShuttle`, a counter for in-flight /// jobs is bumped. This job is said to be "in-flight" until its result is /// popped. Worker threads dequeue jobs as soon as they are available. When a /// worker finishes a job, it enqueues the result. Only when the main thread /// dequeues a result is the count of in-flight jobs decremented. When the main /// thread attempts to dequeue a job but no jobs are in-flight, that means the /// epoch is complete and `pop_result` returns an empty optional. template <typename Job, typename Result> class DataShuttle { public: /// Pushes a new job. Called by the main thread. void push_job(Job job) { new_jobs_.push(std::move(job)); ++in_flight_jobs_; } /// Pushes the result of a job. Called by worker threads. void push_result(Result result) { results_.push(std::move(result)); } /// Returns the next job, blocking until there is one available. Called by /// worker threads. Job pop_job() { return new_jobs_.pop(); } /// Returns the result of a job, or nullopt if all jobs were exhausted. Called /// by the main thread. std::optional<Result> pop_result( std::optional<std::chrono::milliseconds> timeout = std::nullopt) { if (in_flight_jobs_ > 0) { auto result = results_.pop(timeout); --in_flight_jobs_; return result; } return std::nullopt; } /// Discards any jobs that are not yet in flight, and waits for all in-flight /// jobs to finish, discarding their result. void drain() { // Clear all inputs so that no further jobs are scheduled. auto number_cleared = new_jobs_.clear(); in_flight_jobs_ -= number_cleared; // Remove any outstanding results. while (in_flight_jobs_ > 0) { pop_result(); } } /// Returns the number of jobs that are still in progress. /// When this number is zero, an epoch is finished. size_t in_flight_jobs() const noexcept { return in_flight_jobs_; } private: /// The queue for jobs that are not yet in flight. Queue<Job> new_jobs_; /// The number of in-flight jobs. /// NOTE: Not atomic because only manipulated by the main thread. size_t in_flight_jobs_ = 0; /// The queue for results of finished jobs. Queue<Result> results_; }; } // namespace torch::data::detail ```
====================================================================================================================================================== SOURCE CODE FILE: queue.h LINES: 1 SIZE: 2.48 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\detail\queue.h ENCODING: utf-8 ```h #pragma once #include <torch/types.h> #include <c10/util/Exception.h> #include <chrono> #include <condition_variable> #include <cstddef> #include <mutex> #include <queue> namespace torch::data::detail { /// A basic locked, blocking MPMC queue. /// /// Every `push` and `pop` is guarded by a mutex. A condition variable is used /// to communicate insertion of new elements, such that waiting threads will be /// woken up if they are currently waiting inside a call to `pop()`. /// /// Note that this data structure is written specifically for use with the /// `DataLoader`. Its behavior is tailored to this use case and may not be /// applicable to more general uses. template <typename T> class Queue { public: /// Pushes a new value to the back of the `Queue` and notifies one thread on /// the waiting side about this event. void push(T value) { { std::lock_guard<std::mutex> lock(mutex_); queue_.push(std::move(value)); } cv_.notify_one(); } /// Blocks until at least one element is ready to be popped from the front of /// the queue. An optional `timeout` in seconds can be used to limit the time /// spent waiting for an element. If the wait times out, an exception is /// raised. T pop(std::optional<std::chrono::milliseconds> timeout = std::nullopt) { std::unique_lock<std::mutex> lock(mutex_); if (timeout) { if (!cv_.wait_for( lock, *timeout, [this] { return !this->queue_.empty(); })) { // clang-format off TORCH_CHECK(false, "Timeout in DataLoader queue while waiting for next batch" " (timeout was ", timeout->count(), " ms)"); // clang-format on } } else { cv_.wait(lock, [this] { return !this->queue_.empty(); }); } AT_ASSERT(!queue_.empty()); T value = queue_.front(); queue_.pop(); lock.unlock(); return value; } /// Empties the queue and returns the number of elements that were present at /// the start of the function. No threads are notified about this event as it /// is assumed to be used to drain the queue during shutdown of a /// `DataLoader`. size_t clear() { std::lock_guard<std::mutex> lock(this->mutex_); const auto size = queue_.size(); while (!queue_.empty()) { queue_.pop(); } return size; } private: std::queue<T> queue_; std::mutex mutex_; std::condition_variable cv_; }; } // namespace torch::data::detail ```
=========================================================================================================================================================== SOURCE CODE FILE: sequencers.h LINES: 1 SIZE: 4.44 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\detail\sequencers.h ENCODING: utf-8 ```h #pragma once #include <torch/types.h> #include <algorithm> #include <cstddef> #include <vector> namespace torch::data::detail::sequencers { namespace detail { template <typename Result> bool buffer_contains_result(const std::vector<std::optional<Result>>& buffer) { return std::any_of( buffer.begin(), buffer.end(), [](const std::optional<Result>& result) { return result.has_value(); }); } } // namespace detail /// A `Sequencer` accepts a function that yields the next result of a /// `DataLoader` and then has the opportunity to influence the order in which /// these results are returned. The `NoSequencer` does not enforce any /// sequencing and returns any result directly. The `OrderedSequencer` instead /// buffers results internally to return them in order of their sequence number. template <typename Result> struct Sequencer { using ResultProducer = std::function<std::optional<Result>()>; virtual ~Sequencer() = default; virtual std::optional<Result> next(ResultProducer next_result) = 0; }; /// A `Sequencer` that does not enforce any ordering. It is effectively the /// identity function. template <typename Result> struct NoSequencer final : public Sequencer<Result> { using typename Sequencer<Result>::ResultProducer; std::optional<Result> next(ResultProducer next_result) override { return next_result(); } }; /// A `Sequencer` that buffers results and returns them in order of their /// sequence number. The `OrderedSequencer` maintains an internal, monotonically /// incrementing counter for the next sequence number it expects. If it receives /// a result with a higher sequence number, it will buffer it for later (when /// the sequence number reaches that of this result). Otherwise, if the sequence /// numbers match, the result is returned. /// /// Implementation note: The `OrderedSequencer` is implemented with a fixed-size /// buffer. Let `m` be the maximum number of jobs in the data loader's queue and /// `s` be the current sequence number. Assume `m` jobs are scheduled in the /// `DataLoader`. Any new result is stored at index `job.sqn mod m` in the /// `OrderedSequencer`. Why are we sure sequence numbers of new jobs will not /// collide with sequence numbers of buffered jobs? The `OrderedSequencer` will /// not return from `next()` until it receives the result with sqn `s`. This /// means no new jobs can be scheduled in the `DataLoader` in the meantime, /// which enforces that as long as sqn `s` has not been received, `s + m` (which /// would cause a collision in the fixed-size buffer) will not yet be scheduled. template <typename Result> struct OrderedSequencer : public Sequencer<Result> { using typename Sequencer<Result>::ResultProducer; /// Constructs the `OrderedSequencer` with the maximum number of results it /// will ever hold at one point in time. explicit OrderedSequencer(size_t max_jobs) : buffer_(max_jobs) {} /// Buffers results until the next one in the expected order is received. std::optional<Result> next(ResultProducer next_result) override { // If we already have the result for the next sqn, return it. if (auto& maybe_result = buffer(next_sequence_number_)) { auto result = std::move(*maybe_result); buffer(next_sequence_number_++).reset(); return result; } // Otherwise wait for the next result. while (true) { auto result = next_result(); if (!result) { AT_ASSERT(!detail::buffer_contains_result(buffer_)); break; } // If it was not nullopt and the sequence numbers match, return it // directly and bump the sequence number. if (result->sequence_number == next_sequence_number_) { ++next_sequence_number_; return result; } // Stash the result for later. AT_ASSERT(!buffer(result->sequence_number).has_value()); buffer(result->sequence_number) = std::move(result); } // The result was an empty optional, so we are done with this epoch. return std::nullopt; } /// Accesses the buffer at the `index` modulo the buffer size. std::optional<Result>& buffer(size_t index) { return buffer_.at(index % buffer_.size()); } /// The monotonically increasing sequence number we expect. size_t next_sequence_number_ = 0; /// A fixed-size buffer (after construction). std::vector<std::optional<Result>> buffer_; }; } // namespace torch::data::detail::sequencers ```
================================================================================================================================================= SOURCE CODE FILE: example.h LINES: 1 SIZE: 1.31 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\example.h ENCODING: utf-8 ```h #pragma once #include <torch/types.h> namespace torch::data { /// An `Example` from a dataset. /// /// A dataset consists of data and an associated target (label). template <typename Data = at::Tensor, typename Target = at::Tensor> struct Example { using DataType = Data; using TargetType = Target; Example() = default; Example(Data data, Target target) : data(std::move(data)), target(std::move(target)) {} Data data; Target target; }; namespace example { using NoTarget = void; } // namespace example /// A specialization for `Example` that does not have a target. /// /// This class exists so that code can be written for a templated `Example` /// type, and work both for labeled and unlabeled datasets. template <typename Data> struct Example<Data, example::NoTarget> { using DataType = Data; using TargetType = example::NoTarget; Example() = default; /* implicit */ Example(Data data) : data(std::move(data)) {} // When a DataLoader returns an Example like this, that example should be // implicitly convertible to the underlying data type. operator Data&() { return data; } operator const Data&() const { return data; } Data data; }; using TensorExample = Example<at::Tensor, example::NoTarget>; } // namespace torch::data ```
================================================================================================================================================== SOURCE CODE FILE: iterator.h LINES: 1 SIZE: 5.35 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\iterator.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/utils/variadic.h> #include <torch/types.h> #include <c10/util/Exception.h> #include <functional> #include <iterator> #include <memory> #include <type_traits> #include <utility> namespace torch::data { namespace detail { // For increased safety and more separated logic, this implementation of // `Iterator` consists of a `ValidIterator` and a `SentinelIterator`. A // `ValidIterator` yields new batches until the `DataLoader` is exhausted. While // the `DataLoader` is not exhausted, `ValidIterator`s compare equal if they are // the same object. When the `ValidIterator` becomes exhausted, it compares // equal to the `SentinelIterator`, but not before. Half the code here is to // implement double dispatch for the comparison. Got damnit, C++. template <typename Batch> struct ValidIterator; template <typename Batch> struct SentinelIterator; /// Base class for the `ValidIterator` and `SentinelIterator` template <typename Batch> struct IteratorImpl { virtual ~IteratorImpl() = default; virtual void next() = 0; virtual Batch& get() = 0; virtual bool operator==(const IteratorImpl& other) const = 0; virtual bool operator==(const ValidIterator<Batch>& other) const = 0; virtual bool operator==(const SentinelIterator<Batch>& other) const = 0; }; template <typename Batch> struct ValidIterator : public IteratorImpl<Batch> { using BatchProducer = std::function<std::optional<Batch>()>; explicit ValidIterator(BatchProducer next_batch) : next_batch_(std::move(next_batch)) {} /// Fetches the next batch. void next() override { // If we didn't get the very first batch yet, get it now. lazy_initialize(); TORCH_CHECK( batch_.has_value(), "Attempted to increment iterator past the end"); // Increment to the next batch. batch_ = next_batch_(); } /// Returns the current batch. The precondition for this operation to not /// throw an exception is that it has been compared to the `SentinelIterator` /// and did not compare equal. Batch& get() override { // If we didn't get the very first batch yet, get it now. lazy_initialize(); TORCH_CHECK( batch_.has_value(), "Attempted to dereference iterator that was past the end"); return batch_.value(); } /// Does double dispatch. bool operator==(const IteratorImpl<Batch>& other) const override { return other == *this; } /// A `ValidIterator` is equal to the `SentinelIterator` iff. the /// `ValidIterator` has reached the end of the dataloader. bool operator==(const SentinelIterator<Batch>& /* unused */) const override { lazy_initialize(); return !batch_; } /// Returns true if the memory address of `other` equals that of `this`. bool operator==(const ValidIterator<Batch>& other) const override { return &other == this; } /// Gets the very first batch if it has not yet been fetched. void lazy_initialize() const { if (!initialized_) { batch_ = next_batch_(); initialized_ = true; } } BatchProducer next_batch_; mutable std::optional<Batch> batch_; mutable bool initialized_ = false; }; template <typename Batch> struct SentinelIterator : public IteratorImpl<Batch> { void next() override { TORCH_CHECK( false, "Incrementing the DataLoader's past-the-end iterator is not allowed"); } Batch& get() override { TORCH_CHECK( false, "Dereferencing the DataLoader's past-the-end iterator is not allowed"); } /// Does double dispatch. bool operator==(const IteratorImpl<Batch>& other) const override { return other == *this; } /// Calls the comparison operator between `ValidIterator` and /// `SentinelIterator`. bool operator==(const ValidIterator<Batch>& other) const override { return other == *this; } /// Sentinel iterators always compare equal. bool operator==(const SentinelIterator<Batch>& other) const override { return true; } }; } // namespace detail template <typename Batch> class Iterator { public: // Type aliases to make the class recognized as a proper iterator. using difference_type = std::ptrdiff_t; using value_type = Batch; using pointer = Batch*; using reference = Batch&; using iterator_category = std::input_iterator_tag; explicit Iterator(std::unique_ptr<detail::IteratorImpl<Batch>> impl) : impl_(std::move(impl)) {} /// Increments the iterator. /// Only permitted for valid iterators (not past the end). Iterator& operator++() { impl_->next(); return *this; } /// Returns the current batch. /// Only permitted for valid iterators (not past the end). Batch& operator*() { return impl_->get(); } /// Returns a pointer to the current batch. /// Only permitted for valid iterators (not past the end). Batch* operator->() { return &impl_->get(); } /// Compares two iterators for equality. bool operator==(const Iterator& other) const { return *impl_ == *other.impl_; } /// Compares two iterators for inequality. bool operator!=(const Iterator& other) const { return !(*this == other); } private: /// Points either to a `ValidIterator` or to a `SentinelIterator`. std::shared_ptr<detail::IteratorImpl<Batch>> impl_; }; } // namespace torch::data ```
================================================================================================================================================== SOURCE CODE FILE: samplers.h LINES: 1 SIZE: 0.32 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\samplers.h ENCODING: utf-8 ```h #pragma once #include <torch/data/samplers/base.h> #include <torch/data/samplers/custom_batch_request.h> #include <torch/data/samplers/distributed.h> #include <torch/data/samplers/random.h> #include <torch/data/samplers/sequential.h> #include <torch/data/samplers/serialize.h> #include <torch/data/samplers/stream.h> ```
======================================================================================================================================================= SOURCE CODE FILE: base.h LINES: 1 SIZE: 1.18 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\samplers\base.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <torch/types.h> #include <cstddef> #include <mutex> #include <vector> namespace torch::serialize { class OutputArchive; class InputArchive; } // namespace torch::serialize namespace torch::data::samplers { /// A `Sampler` is an object that yields an index with which to access a /// dataset. template <typename BatchRequest = std::vector<size_t>> class Sampler { public: using BatchRequestType = BatchRequest; virtual ~Sampler() = default; /// Resets the `Sampler`'s internal state. /// Typically called before a new epoch. /// Optionally, accepts a new size when reseting the sampler. virtual void reset(std::optional<size_t> new_size) = 0; /// Returns the next index if possible, or an empty optional if the /// sampler is exhausted for this epoch. virtual std::optional<BatchRequest> next(size_t batch_size) = 0; /// Serializes the `Sampler` to the `archive`. virtual void save(serialize::OutputArchive& archive) const = 0; /// Deserializes the `Sampler` from the `archive`. virtual void load(serialize::InputArchive& archive) = 0; }; } // namespace torch::data::samplers ```
======================================================================================================================================================================= SOURCE CODE FILE: custom_batch_request.h LINES: 1 SIZE: 0.51 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\samplers\custom_batch_request.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <cstddef> namespace torch::data::samplers { /// A base class for custom index types. struct TORCH_API CustomBatchRequest { CustomBatchRequest() = default; CustomBatchRequest(const CustomBatchRequest&) = default; CustomBatchRequest(CustomBatchRequest&&) noexcept = default; virtual ~CustomBatchRequest() = default; /// The number of elements accessed by this index. virtual size_t size() const = 0; }; } // namespace torch::data::samplers ```
============================================================================================================================================================== SOURCE CODE FILE: distributed.h LINES: 1 SIZE: 4.09 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\samplers\distributed.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <torch/data/samplers/base.h> #include <cstddef> #include <vector> namespace torch::serialize { class OutputArchive; class InputArchive; } // namespace torch::serialize namespace torch::data::samplers { /// A `Sampler` that selects a subset of indices to sample from and defines a /// sampling behavior. In a distributed setting, this selects a subset of the /// indices depending on the provided num_replicas and rank parameters. The /// `Sampler` performs a rounding operation based on the `allow_duplicates` /// parameter to decide the local sample count. template <typename BatchRequest = std::vector<size_t>> class DistributedSampler : public Sampler<BatchRequest> { public: DistributedSampler( size_t size, size_t num_replicas = 1, size_t rank = 0, bool allow_duplicates = true) : size_(size), num_replicas_(num_replicas), rank_(rank), allow_duplicates_(allow_duplicates) {} /// Set the epoch for the current enumeration. This can be used to alter the /// sample selection and shuffling behavior. void set_epoch(size_t epoch) { epoch_ = epoch; } size_t epoch() const { return epoch_; } protected: size_t local_sample_count() { if (allow_duplicates_) { return (size_ + num_replicas_ - 1) / num_replicas_; } else { return size_ / num_replicas_; } } // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) size_t size_; // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) size_t num_replicas_; // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) size_t rank_; // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) size_t epoch_{0}; // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) bool allow_duplicates_; }; /// Select samples randomly. The sampling order is shuffled at each `reset()` /// call. class TORCH_API DistributedRandomSampler : public DistributedSampler<> { public: DistributedRandomSampler( size_t size, size_t num_replicas = 1, size_t rank = 0, bool allow_duplicates = true); /// Resets the `DistributedRandomSampler` to a new set of indices. void reset(std::optional<size_t> new_size = std::nullopt) override; /// Returns the next batch of indices. std::optional<std::vector<size_t>> next(size_t batch_size) override; /// Serializes the `DistributedRandomSampler` to the `archive`. void save(serialize::OutputArchive& archive) const override; /// Deserializes the `DistributedRandomSampler` from the `archive`. void load(serialize::InputArchive& archive) override; /// Returns the current index of the `DistributedRandomSampler`. size_t index() const noexcept; private: void populate_indices(); size_t begin_index_; size_t end_index_; size_t sample_index_; std::vector<size_t> all_indices_; }; /// Select samples sequentially. class TORCH_API DistributedSequentialSampler : public DistributedSampler<> { public: DistributedSequentialSampler( size_t size, size_t num_replicas = 1, size_t rank = 0, bool allow_duplicates = true); /// Resets the `DistributedSequentialSampler` to a new set of indices. void reset(std::optional<size_t> new_size = std::nullopt) override; /// Returns the next batch of indices. std::optional<std::vector<size_t>> next(size_t batch_size) override; /// Serializes the `DistributedSequentialSampler` to the `archive`. void save(serialize::OutputArchive& archive) const override; /// Deserializes the `DistributedSequentialSampler` from the `archive`. void load(serialize::InputArchive& archive) override; /// Returns the current index of the `DistributedSequentialSampler`. size_t index() const noexcept; private: void populate_indices(); size_t begin_index_; size_t end_index_; size_t sample_index_; std::vector<size_t> all_indices_; }; } // namespace torch::data::samplers ```
========================================================================================================================================================= SOURCE CODE FILE: random.h LINES: 1 SIZE: 1.47 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\samplers\random.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <torch/data/samplers/base.h> #include <torch/types.h> #include <cstddef> #include <vector> namespace torch::serialize { class OutputArchive; class InputArchive; } // namespace torch::serialize namespace torch::data::samplers { /// A `Sampler` that returns random indices. class TORCH_API RandomSampler : public Sampler<> { public: /// Constructs a `RandomSampler` with a size and dtype for the stored indices. /// /// The constructor will eagerly allocate all required indices, which is the /// sequence `0 ... size - 1`. `index_dtype` is the data type of the stored /// indices. You can change it to influence memory usage. explicit RandomSampler(int64_t size, Dtype index_dtype = torch::kInt64); ~RandomSampler() override; /// Resets the `RandomSampler` to a new set of indices. void reset(std::optional<size_t> new_size = std::nullopt) override; /// Returns the next batch of indices. std::optional<std::vector<size_t>> next(size_t batch_size) override; /// Serializes the `RandomSampler` to the `archive`. void save(serialize::OutputArchive& archive) const override; /// Deserializes the `RandomSampler` from the `archive`. void load(serialize::InputArchive& archive) override; /// Returns the current index of the `RandomSampler`. size_t index() const noexcept; private: at::Tensor indices_; int64_t index_ = 0; }; } // namespace torch::data::samplers ```
============================================================================================================================================================= SOURCE CODE FILE: sequential.h LINES: 1 SIZE: 1.21 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\samplers\sequential.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <torch/data/samplers/base.h> #include <torch/types.h> #include <cstddef> #include <vector> namespace torch::serialize { class OutputArchive; class InputArchive; } // namespace torch::serialize namespace torch::data::samplers { /// A `Sampler` that returns indices sequentially. class TORCH_API SequentialSampler : public Sampler<> { public: /// Creates a `SequentialSampler` that will return indices in the range /// `0...size - 1`. explicit SequentialSampler(size_t size); /// Resets the `SequentialSampler` to zero. void reset(std::optional<size_t> new_size = std::nullopt) override; /// Returns the next batch of indices. std::optional<std::vector<size_t>> next(size_t batch_size) override; /// Serializes the `SequentialSampler` to the `archive`. void save(serialize::OutputArchive& archive) const override; /// Deserializes the `SequentialSampler` from the `archive`. void load(serialize::InputArchive& archive) override; /// Returns the current index of the `SequentialSampler`. size_t index() const noexcept; private: size_t size_; size_t index_{0}; }; } // namespace torch::data::samplers ```
============================================================================================================================================================ SOURCE CODE FILE: serialize.h LINES: 1 SIZE: 0.67 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\samplers\serialize.h ENCODING: utf-8 ```h #pragma once #include <torch/data/samplers/base.h> #include <torch/serialize/archive.h> namespace torch::data::samplers { /// Serializes a `Sampler` into an `OutputArchive`. template <typename BatchRequest> serialize::OutputArchive& operator<<( serialize::OutputArchive& archive, const Sampler<BatchRequest>& sampler) { sampler.save(archive); return archive; } /// Deserializes a `Sampler` from an `InputArchive`. template <typename BatchRequest> serialize::InputArchive& operator>>( serialize::InputArchive& archive, Sampler<BatchRequest>& sampler) { sampler.load(archive); return archive; } } // namespace torch::data::samplers ```
========================================================================================================================================================= SOURCE CODE FILE: stream.h LINES: 1 SIZE: 1.98 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\samplers\stream.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <torch/data/samplers/base.h> #include <torch/data/samplers/custom_batch_request.h> #include <torch/types.h> #include <cstddef> namespace torch::serialize { class InputArchive; class OutputArchive; } // namespace torch::serialize namespace torch::data::samplers { /// A wrapper around a batch size value, which implements the /// `CustomBatchRequest` interface. struct TORCH_API BatchSize : public CustomBatchRequest { explicit BatchSize(size_t size); size_t size() const noexcept override; operator size_t() const noexcept; size_t size_; }; /// A sampler for (potentially infinite) streams of data. /// /// The major feature of the `StreamSampler` is that it does not return /// particular indices, but instead only the number of elements to fetch from /// the dataset. The dataset has to decide how to produce those elements. class TORCH_API StreamSampler : public Sampler<BatchSize> { public: /// Constructs the `StreamSampler` with the number of individual examples that /// should be fetched until the sampler is exhausted. explicit StreamSampler(size_t epoch_size); /// Resets the internal state of the sampler. void reset(std::optional<size_t> new_size = std::nullopt) override; /// Returns a `BatchSize` object with the number of elements to fetch in the /// next batch. This number is the minimum of the supplied `batch_size` and /// the difference between the `epoch_size` and the current index. If the /// `epoch_size` has been reached, returns an empty optional. std::optional<BatchSize> next(size_t batch_size) override; /// Serializes the `StreamSampler` to the `archive`. void save(serialize::OutputArchive& archive) const override; /// Deserializes the `StreamSampler` from the `archive`. void load(serialize::InputArchive& archive) override; private: size_t examples_retrieved_so_far_ = 0; size_t epoch_size_; }; } // namespace torch::data::samplers ```
==================================================================================================================================================== SOURCE CODE FILE: transforms.h LINES: 1 SIZE: 0.22 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\transforms.h ENCODING: utf-8 ```h #pragma once #include <torch/data/transforms/base.h> #include <torch/data/transforms/collate.h> #include <torch/data/transforms/lambda.h> #include <torch/data/transforms/stack.h> #include <torch/data/transforms/tensor.h> ```
========================================================================================================================================================= SOURCE CODE FILE: base.h LINES: 1 SIZE: 1.59 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\transforms\base.h ENCODING: utf-8 ```h #pragma once #include <torch/types.h> #include <utility> #include <vector> namespace torch::data::transforms { /// A transformation of a batch to a new batch. template <typename InputBatch, typename OutputBatch> class BatchTransform { public: using InputBatchType = InputBatch; using OutputBatchType = OutputBatch; virtual ~BatchTransform() = default; /// Applies the transformation to the given `input_batch`. virtual OutputBatch apply_batch(InputBatch input_batch) = 0; }; /// A transformation of individual input examples to individual output examples. /// /// Just like a `Dataset` is a `BatchDataset`, a `Transform` is a /// `BatchTransform` that can operate on the level of individual examples rather /// than entire batches. The batch-level transform is implemented (by default) /// in terms of the example-level transform, though this can be customized. template <typename Input, typename Output> class Transform : public BatchTransform<std::vector<Input>, std::vector<Output>> { public: using InputType = Input; using OutputType = Output; /// Applies the transformation to the given `input`. virtual OutputType apply(InputType input) = 0; /// Applies the `transformation` over the entire `input_batch`. std::vector<Output> apply_batch(std::vector<Input> input_batch) override { std::vector<Output> output_batch; output_batch.reserve(input_batch.size()); for (auto&& input : input_batch) { output_batch.push_back(apply(std::move(input))); } return output_batch; } }; } // namespace torch::data::transforms ```
============================================================================================================================================================ SOURCE CODE FILE: collate.h LINES: 1 SIZE: 1.07 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\transforms\collate.h ENCODING: utf-8 ```h #pragma once #include <torch/data/example.h> #include <torch/data/transforms/lambda.h> #include <vector> namespace torch::data::transforms { /// A `Collation` is a transform that reduces a batch into a single value. /// The result is a `BatchDataset` that has the type of the single value as its /// `BatchType`. template <typename T, typename BatchType = std::vector<T>> using Collation = BatchTransform<BatchType, T>; /// A `Collate` allows passing a custom function to reduce/collate a batch /// into a single value. It's effectively the lambda version of `Collation`, /// which you could subclass and override `operator()` to achieve the same. /// /// \rst /// .. code-block:: cpp /// using namespace torch::data; /// /// auto dataset = datasets::MNIST("path/to/mnist") /// .map(transforms::Collate<Example<>>([](std::vector<Example<>> e) { /// return std::move(e.front()); /// })); /// \endrst template <typename T, typename BatchType = std::vector<T>> using Collate = BatchLambda<BatchType, T>; } // namespace torch::data::transforms ```
=========================================================================================================================================================== SOURCE CODE FILE: lambda.h LINES: 1 SIZE: 1.67 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\transforms\lambda.h ENCODING: utf-8 ```h #pragma once #include <torch/data/transforms/base.h> #include <functional> #include <utility> #include <vector> namespace torch::data::transforms { /// A `BatchTransform` that applies a user-provided functor to a batch. template <typename Input, typename Output = Input> class BatchLambda : public BatchTransform<Input, Output> { public: using typename BatchTransform<Input, Output>::InputBatchType; using typename BatchTransform<Input, Output>::OutputBatchType; using FunctionType = std::function<OutputBatchType(InputBatchType)>; /// Constructs the `BatchLambda` from the given `function` object. explicit BatchLambda(FunctionType function) : function_(std::move(function)) {} /// Applies the user-provided function object to the `input_batch`. OutputBatchType apply_batch(InputBatchType input_batch) override { return function_(std::move(input_batch)); } private: FunctionType function_; }; // A `Transform` that applies a user-provided functor to individual examples. template <typename Input, typename Output = Input> class Lambda : public Transform<Input, Output> { public: using typename Transform<Input, Output>::InputType; using typename Transform<Input, Output>::OutputType; using FunctionType = std::function<Output(Input)>; /// Constructs the `Lambda` from the given `function` object. explicit Lambda(FunctionType function) : function_(std::move(function)) {} /// Applies the user-provided function object to the `input`. OutputType apply(InputType input) override { return function_(std::move(input)); } private: FunctionType function_; }; } // namespace torch::data::transforms ```
========================================================================================================================================================== SOURCE CODE FILE: stack.h LINES: 1 SIZE: 1.39 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\transforms\stack.h ENCODING: utf-8 ```h #pragma once #include <torch/data/example.h> #include <torch/data/transforms/collate.h> #include <torch/types.h> #include <utility> #include <vector> namespace torch::data::transforms { template <typename T = Example<>> struct Stack; /// A `Collation` for `Example<Tensor, Tensor>` types that stacks all data /// tensors into one tensor, and all target (label) tensors into one tensor. template <> struct Stack<Example<>> : public Collation<Example<>> { Example<> apply_batch(std::vector<Example<>> examples) override { std::vector<torch::Tensor> data, targets; data.reserve(examples.size()); targets.reserve(examples.size()); for (auto& example : examples) { data.push_back(std::move(example.data)); targets.push_back(std::move(example.target)); } return {torch::stack(data), torch::stack(targets)}; } }; /// A `Collation` for `Example<Tensor, NoTarget>` types that stacks all data /// tensors into one tensor. template <> struct Stack<TensorExample> : public Collation<Example<Tensor, example::NoTarget>> { TensorExample apply_batch(std::vector<TensorExample> examples) override { std::vector<torch::Tensor> data; data.reserve(examples.size()); for (auto& example : examples) { data.push_back(std::move(example.data)); } return torch::stack(data); } }; } // namespace torch::data::transforms ```
=========================================================================================================================================================== SOURCE CODE FILE: tensor.h LINES: 1 SIZE: 2.44 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\transforms\tensor.h ENCODING: utf-8 ```h #pragma once #include <torch/data/example.h> #include <torch/data/transforms/base.h> #include <torch/types.h> #include <functional> #include <utility> namespace torch::data::transforms { /// A `Transform` that is specialized for the typical `Example<Tensor, Tensor>` /// combination. It exposes a single `operator()` interface hook (for /// subclasses), and calls this function on input `Example` objects. template <typename Target = Tensor> class TensorTransform : public Transform<Example<Tensor, Target>, Example<Tensor, Target>> { public: using E = Example<Tensor, Target>; using typename Transform<E, E>::InputType; using typename Transform<E, E>::OutputType; /// Transforms a single input tensor to an output tensor. virtual Tensor operator()(Tensor input) = 0; /// Implementation of `Transform::apply` that calls `operator()`. OutputType apply(InputType input) override { input.data = (*this)(std::move(input.data)); return input; } }; /// A `Lambda` specialized for the typical `Example<Tensor, Tensor>` input type. template <typename Target = Tensor> class TensorLambda : public TensorTransform<Target> { public: using FunctionType = std::function<Tensor(Tensor)>; /// Creates a `TensorLambda` from the given `function`. explicit TensorLambda(FunctionType function) : function_(std::move(function)) {} /// Applies the user-provided functor to the input tensor. Tensor operator()(Tensor input) override { return function_(std::move(input)); } private: FunctionType function_; }; /// Normalizes input tensors by subtracting the supplied mean and dividing by /// the given standard deviation. template <typename Target = Tensor> struct Normalize : public TensorTransform<Target> { /// Constructs a `Normalize` transform. The mean and standard deviation can be /// anything that is broadcastable over the input tensors (like single /// scalars). Normalize(ArrayRef<double> mean, ArrayRef<double> stddev) : mean(torch::tensor(mean, torch::kFloat32) .unsqueeze(/*dim=*/1) .unsqueeze(/*dim=*/2)), stddev(torch::tensor(stddev, torch::kFloat32) .unsqueeze(/*dim=*/1) .unsqueeze(/*dim=*/2)) {} torch::Tensor operator()(Tensor input) override { return input.sub(mean).div(stddev); } torch::Tensor mean, stddev; }; } // namespace torch::data::transforms ```
========================================================================================================================================================== SOURCE CODE FILE: worker_exception.h LINES: 1 SIZE: 1.13 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\data\worker_exception.h ENCODING: utf-8 ```h #pragma once #include <exception> #include <string> #include <utility> namespace torch::data { /// An exception thrown when a DataLoader's worker thread throws an exception, /// which is caught. A `WorkerException` stores an `exception_ptr` to the /// original exception thrown in the worker thread. struct WorkerException : public std::exception { /// Constructs a `WorkerException` from an `exception_ptr`. explicit WorkerException(std::exception_ptr original) // NOLINTNEXTLINE(bugprone-throw-keyword-missing) : original_exception(std::move(original)), message("Caught exception in DataLoader worker thread.") { try { std::rethrow_exception(original_exception); } catch (std::exception& e) { message += " Original message: "; message += e.what(); } } const char* what() const noexcept override { return message.c_str(); } /// The original exception thrown in the worker thread. std::exception_ptr original_exception; /// This exception's message (not the original exception's message). std::string message; }; } // namespace torch::data ```
=============================================================================================================================================================== SOURCE CODE FILE: TensorDataContainer.h LINES: 1 SIZE: 12.99 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\detail\TensorDataContainer.h ENCODING: utf-8 ```h #pragma once #include <ATen/Dispatch.h> #include <ATen/ScalarOps.h> #include <ATen/core/Tensor.h> #include <ATen/core/grad_mode.h> #include <c10/util/irange.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/tensor.h> #endif #include <initializer_list> namespace torch::detail { enum class TensorDataContainerType { Scalar, InitList, Tensor }; struct TensorDataContainer; inline std::ostream& operator<<( std::ostream& stream, const TensorDataContainer& tensor_data_container); inline c10::ScalarType compute_desired_dtype(c10::ScalarType scalar_type) { if (scalar_type == at::kInt || scalar_type == at::kLong) { // C++ `torch::tensor` with an integer type or an `at::ArrayRef` / // `std::vector` / (nested) braced-init-list of integer types always // produces a tensor of dtype `at::kLong` (aka. int64_t), matching Python // `torch.tensor` behavior. return at::kLong; } else if (scalar_type == at::kFloat || scalar_type == at::kDouble) { // C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` / // `std::vector` / (nested) braced-init-list of floating-point types always // produces a tensor of dtype `torch::get_default_dtype()`, matching Python // `torch.tensor` behavior. return at::typeMetaToScalarType(at::get_default_dtype()); } else { return scalar_type; } } // We use `TensorDataContainer` to support converting the following data // container types into the equivalent Tensor: // // 1. Arbitrarily nested braced-init-list (e.g. `{{1, 2}, {3, 4}}`). // 2. `at::ArrayRef` of supported tensor data types. // 3. `std::vector` of supported tensor data types. // // At any time, a `TensorDataContainer` object represents one of the following: // // 1. A scalar with value `scalar()` and type `scalar_type()`. // 2. A Tensor represented in `std::initializer_list<TensorDataContainer>` form, // with value `init_list()`, Tensor scalar type `scalar_type()`, and Tensor // sizes `sizes()`. // 3. A Tensor represented in `at::Tensor` form, with value `tensor()`, scalar // type `scalar_type()`, // and Tensor sizes `sizes()`. // // All the infrastructure here is mostly to support converting an arbitrarily // nested braced-init-list to the equivalent Tensor successfully. Consider the // following example: // // `torch::tensor({{1}, {2}})` // // this will call into the `torch::tensor` function: // // `at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const // at::TensorOptions& options = {})` // // the compiler will first try to convert `{{1}, {2}}` to `TensorDataContainer` // type: // // `TensorDataContainer({{1}, {2}})` // // which matches to the // `TensorDataContainer(std::initializer_list<TensorDataContainer>)` // constructor, and in an attempt to convert `{1}` and `{2}` to // `TensorDataContainer`, it calls the following: // // `TensorDataContainer({1})` (same call path happens for `{2}`, and we'll just // focus on `{1}` here) // // At this point, theoretically there are two plausible ways for `{1}` to be // matched to one of the constructors of `TensorDataContainer`: // // 1. It can be a list-initialization of a scalar value, thus matching // `TensorDataContainer(int value)`. // 2. It can be converted to `std::initializer_list<TensorDataContainer>`, thus // matching // `TensorDataContainer(std::initializer_list<TensorDataContainer>)`. // // How does the compiler decide which one to choose? According to // `https://en.cppreference.com/w/cpp/language/list_initialization`, // braced-init-list always prefers the constructor that takes // `std::initializer_list`. Hence we happily move forward with constructor #2, // and it calls the following: // // `TensorDataContainer(1)` // // Now it matches `TensorDataContainer(int value)`, which stores `1` as a scalar // value. All is good. struct TensorDataContainer { // NOTE: For tensors with zero-size dimensions (e.g. `torch::tensor({{}, // {}})`), the innermost empty braced-init-list `{}` matches the default // constructor of the innermost `TensorDataContainer`. TensorDataContainer() : sizes_({0}), // NOTE: In Python, the dtype of tensors with zero-size dimensions (e.g. // `torch.tensor([[], []])`) depends on the value of // `torch.get_default_dtype()`, and we should do the same for the C++ // equivalent. scalar_type_(at::typeMetaToScalarType(at::get_default_dtype())), type_(TensorDataContainerType::InitList) {} #define TENSOR(T, S) \ TensorDataContainer(T value) \ : scalar_type_(at::k##S), \ type_(TensorDataContainerType::Scalar), \ scalar_(value) {} AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR) AT_FORALL_COMPLEX_TYPES(TENSOR) #undef TENSOR TensorDataContainer(std::initializer_list<TensorDataContainer> init_list) : scalar_type_(init_list.begin()->scalar_type()), type_(TensorDataContainerType::InitList), init_list_(init_list) { const TensorDataContainer& first_elem = *(init_list.begin()); for (const auto& elem : init_list) { TORCH_CHECK( elem.sizes() == first_elem.sizes(), "Expected all sub-lists to have sizes: ", first_elem.sizes(), " (e.g. ", first_elem, "), ", "but got sub-list ", elem, " with sizes: ", elem.sizes()); TORCH_CHECK( elem.scalar_type() == first_elem.scalar_type(), "Expected all elements of the tensor to have the same scalar type: ", first_elem.scalar_type(), ", but got element of scalar type: ", elem.scalar_type()); } sizes_.reserve(first_elem.sizes().size() + 1); sizes_.push_back(static_cast<int64_t>(init_list.size())); sizes_.insert( sizes_.end(), first_elem.sizes().begin(), first_elem.sizes().end()); } #define TENSOR(T, S) \ TensorDataContainer(at::ArrayRef<T> values) \ : sizes_({(int64_t)values.size()}), \ scalar_type_(at::k##S), \ type_(TensorDataContainerType::Tensor) { \ at::AutoDispatchBelowAutograd mode; \ if (scalar_type_ == at::kBool) { \ tensor_ = at::tensor(values, at::TensorOptions().device(at::kCPU)); \ } else { \ tensor_ = at::tensor(values, at::dtype(scalar_type_).device(at::kCPU)); \ } \ } AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR) AT_FORALL_COMPLEX_TYPES(TENSOR) #undef TENSOR // NOTE: We need to handle `std::vector` explicitly instead of relying on an // implicit conversion to `at::ArrayRef`, otherwise the following error can be // thrown when calling `torch::tensor(std::vector<int>({1, 2}))`: // ``` // error: no matching function for call to 'tensor(const std::vector<int>&)' // no known conversion for argument 1 from 'const std::vector<int>' to // 'torch::detail::TensorDataContainer' // ``` // // NOTE: `torch::tensor(std::vector<bool>)` is not supported for now, because // ArrayRef<bool> cannot be constructed from a std::vector<bool> bitfield. #define TENSOR(T, S) \ TensorDataContainer(const std::vector<T>& values) \ : TensorDataContainer(at::ArrayRef<T>(values)) {} AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TENSOR) AT_FORALL_COMPLEX_TYPES(TENSOR) #undef TENSOR bool is_scalar() const { return type_ == TensorDataContainerType::Scalar; } const c10::Scalar& scalar() const { TORCH_CHECK( is_scalar(), "Can only call `scalar()` on a TensorDataContainer that has `is_scalar() == true`"); return scalar_; } bool is_init_list() const { return type_ == TensorDataContainerType::InitList; } const std::initializer_list<TensorDataContainer>& init_list() const { TORCH_CHECK( is_init_list(), "Can only call `init_list()` on a TensorDataContainer that has `is_init_list() == true`"); return init_list_; } bool is_tensor() const { return type_ == TensorDataContainerType::Tensor; } const at::Tensor& tensor() const { TORCH_CHECK( is_tensor(), "Can only call `tensor()` on a TensorDataContainer that has `is_tensor() == true`"); return tensor_; } const std::vector<int64_t>& sizes() const { return sizes_; } const c10::ScalarType& scalar_type() const { return scalar_type_; } at::Tensor convert_to_tensor(at::TensorOptions options) const { if (!options.has_dtype()) { options = options.dtype(compute_desired_dtype(scalar_type_)); } if (is_scalar()) { at::AutoDispatchBelowAutograd mode; return at::scalar_tensor(scalar_, options); } else if (is_init_list()) { // NOTE: Here we explicitly choose to initialize the tensor on CPU first, // fill each element of the tensor, and then move the tensor to the // desired device. For CUDA device, this approach only involves 1 CUDA // kernel launch, and is much faster than initializing the tensor on CUDA // first and then filling each element of it (which involves `N` CUDA // kernel launches where `N` is the number of the elements in the tensor). at::Tensor tensor = ([&]() { at::AutoDispatchBelowAutograd mode; return at::empty(sizes_, options.device(at::kCPU)); })(); fill_tensor(tensor); return tensor.to(options.device()); } else if (is_tensor()) { auto output = tensor_.to(options); TORCH_CHECK( !tensor_.is_complex() || output.is_complex(), "can not do torch::tensor(complex, dtype=non-complex) because complex can not be casted to real number without loss of information"); return output; } else { TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type"); } } void pretty_print_recursive(std::ostream& stream) const { if (is_scalar()) { AT_DISPATCH_ALL_TYPES_AND3( at::kBool, at::kHalf, at::kBFloat16, scalar_type_, "TensorDataContainer_pretty_print_scalar", [&] { stream << scalar_.to<scalar_t>(); }); } else if (is_init_list()) { stream << "{"; for (const TensorDataContainer* it = init_list_.begin(); it != init_list_.end(); it++) { stream << *it; if (std::next(it) != init_list_.end()) stream << ", "; } stream << "}"; } else if (is_tensor()) { stream << "{"; for (const auto i : c10::irange(tensor_.sizes()[0])) { AT_DISPATCH_ALL_TYPES_AND3( at::kBool, at::kHalf, at::kBFloat16, scalar_type_, "TensorDataContainer_pretty_print_tensor_item", [&] { stream << tensor_[i].item<scalar_t>(); }); if (i != tensor_.sizes()[0] - 1) stream << ", "; } stream << "}"; } else { TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type"); } } private: void fill_tensor(at::Tensor& tensor) const { if (is_scalar()) { TORCH_INTERNAL_ASSERT( tensor.dim() == 0, "Expected a 0-dim Tensor, but got Tensor with dimensions: ", tensor.dim()); at::NoGradGuard guard; tensor.fill_(scalar_); } else if (is_init_list()) { TORCH_INTERNAL_ASSERT( tensor.sizes()[0] == (int64_t)init_list_.size(), "Expected a Tensor with size ", init_list_.size(), " in its first dimension, but got Tensor with size ", tensor.sizes()[0], " in its first dimension"); int64_t index = 0; for (const auto& elem : init_list_) { at::Tensor slice = tensor[index]; elem.fill_tensor(slice); index++; } } else if (is_tensor()) { TORCH_INTERNAL_ASSERT( false, "TensorDataContainer is already a Tensor type, `fill_tensor` should not be called"); } else { TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type"); } } std::vector<int64_t> sizes_; c10::ScalarType scalar_type_; TensorDataContainerType type_; c10::Scalar scalar_; std::initializer_list<TensorDataContainer> init_list_; at::Tensor tensor_; }; inline std::ostream& operator<<( std::ostream& stream, const TensorDataContainer& tensor_data_container) { tensor_data_container.pretty_print_recursive(stream); return stream; } } // namespace torch::detail ```
================================================================================================================================================== SOURCE CODE FILE: static.h LINES: 1 SIZE: 2.11 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\detail\static.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/utils/variadic.h> #include <torch/types.h> #include <cstdint> #include <type_traits> namespace torch::nn { class Module; } // namespace torch::nn namespace torch::detail { /// Detects if a type T has a forward() method. template <typename T> struct has_forward { // Declare two types with differing size. using yes = int8_t; using no = int16_t; // Here we declare two functions. The first is only enabled if `&U::forward` // is well-formed and returns the `yes` type. In C++, the ellipsis parameter // type (`...`) always puts the function at the bottom of overload resolution. // This is specified in the standard as: 1) A standard conversion sequence is // always better than a user-defined conversion sequence or an ellipsis // conversion sequence. 2) A user-defined conversion sequence is always better // than an ellipsis conversion sequence This means that if the first overload // is viable, it will be preferred over the second as long as we pass any // convertible type. The type of `&U::forward` is a pointer type, so we can // pass e.g. 0. template <typename U> static yes test(decltype(&U::forward)); template <typename U> static no test(...); // Finally we test statically whether the size of the type returned by the // selected overload is the size of the `yes` type. static constexpr bool value = (sizeof(test<T>(nullptr)) == sizeof(yes)); }; template <typename Head = void, typename... Tail> constexpr bool check_not_lvalue_references() { return ( !std::is_lvalue_reference_v<Head> || std::is_const_v<std::remove_reference_t< Head>>)&&check_not_lvalue_references<Tail...>(); } template <> inline constexpr bool check_not_lvalue_references<void>() { return true; } /// A type trait whose `value` member is true if `M` derives from `Module`. template <typename M> using is_module = std::is_base_of<torch::nn::Module, std::decay_t<M>>; template <typename M, typename T = void> using enable_if_module_t = std::enable_if_t<is_module<M>::value, T>; } // namespace torch::detail ```
========================================================================================================================================= SOURCE CODE FILE: enum.h LINES: 1 SIZE: 7.48 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\enum.h ENCODING: utf-8 ```h #pragma once #include <string> #include <variant> #include <ATen/core/Reduction.h> #include <c10/util/Exception.h> #include <torch/csrc/Export.h> #define TORCH_ENUM_DECLARE(name) \ namespace torch { \ namespace enumtype { \ /* \ NOTE: We need to provide the default constructor for each struct, \ otherwise Clang 3.8 would complain: \ ``` \ error: default initialization of an object of const type 'const \ enumtype::Enum1' without a user-provided default constructor \ ``` \ */ \ struct k##name { \ k##name() {} \ }; \ } \ TORCH_API extern const enumtype::k##name k##name; \ } #define TORCH_ENUM_DEFINE(name) \ namespace torch { \ const enumtype::k##name k##name; \ } #define TORCH_ENUM_PRETTY_PRINT(name) \ std::string operator()(const enumtype::k##name& v [[maybe_unused]]) const { \ std::string k("k"); \ return k + #name; \ } // NOTE: Backstory on why we need the following two macros: // // Consider the following options class: // // ``` // struct TORCH_API SomeOptions { // typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum> // reduction_t; SomeOptions(reduction_t reduction = torch::kMean) : // reduction_(reduction) {} // // TORCH_ARG(reduction_t, reduction); // }; // ``` // // and the functional that uses it: // // ``` // Tensor some_functional( // const Tensor& input, // SomeOptions options = {}) { // ... // } // ``` // // Normally, we would expect this to work: // // `F::some_functional(input, torch::kNone)` // // However, it throws the following error instead: // // ``` // error: could not convert `torch::kNone` from `const torch::enumtype::kNone` // to `torch::nn::SomeOptions` // ``` // // To get around this problem, we explicitly provide the following constructors // for `SomeOptions`: // // ``` // SomeOptions(torch::enumtype::kNone reduction) : reduction_(torch::kNone) {} // SomeOptions(torch::enumtype::kMean reduction) : reduction_(torch::kMean) {} // SomeOptions(torch::enumtype::kSum reduction) : reduction_(torch::kSum) {} // ``` // // so that the conversion from `torch::kNone` to `SomeOptions` would work. // // Note that we also provide the default constructor `SomeOptions() {}`, so that // `SomeOptions options = {}` can work. #define TORCH_OPTIONS_CTOR_VARIANT_ARG3( \ OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3) \ OPTIONS_NAME() = default; \ OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {} \ OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {} \ OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {} #define TORCH_OPTIONS_CTOR_VARIANT_ARG4( \ OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3, TYPE4) \ OPTIONS_NAME() = default; \ OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {} \ OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {} \ OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {} \ OPTIONS_NAME(torch::enumtype::TYPE4 ARG_NAME) : ARG_NAME##_(torch::TYPE4) {} TORCH_ENUM_DECLARE(Linear) TORCH_ENUM_DECLARE(Conv1D) TORCH_ENUM_DECLARE(Conv2D) TORCH_ENUM_DECLARE(Conv3D) TORCH_ENUM_DECLARE(ConvTranspose1D) TORCH_ENUM_DECLARE(ConvTranspose2D) TORCH_ENUM_DECLARE(ConvTranspose3D) TORCH_ENUM_DECLARE(Sigmoid) TORCH_ENUM_DECLARE(Tanh) TORCH_ENUM_DECLARE(ReLU) TORCH_ENUM_DECLARE(GELU) TORCH_ENUM_DECLARE(SiLU) TORCH_ENUM_DECLARE(Mish) TORCH_ENUM_DECLARE(LeakyReLU) TORCH_ENUM_DECLARE(FanIn) TORCH_ENUM_DECLARE(FanOut) TORCH_ENUM_DECLARE(Constant) TORCH_ENUM_DECLARE(Reflect) TORCH_ENUM_DECLARE(Replicate) TORCH_ENUM_DECLARE(Circular) TORCH_ENUM_DECLARE(Nearest) TORCH_ENUM_DECLARE(Bilinear) TORCH_ENUM_DECLARE(Bicubic) TORCH_ENUM_DECLARE(Trilinear) TORCH_ENUM_DECLARE(Area) TORCH_ENUM_DECLARE(NearestExact) TORCH_ENUM_DECLARE(Sum) TORCH_ENUM_DECLARE(Mean) TORCH_ENUM_DECLARE(Max) TORCH_ENUM_DECLARE(None) TORCH_ENUM_DECLARE(BatchMean) TORCH_ENUM_DECLARE(Zeros) TORCH_ENUM_DECLARE(Border) TORCH_ENUM_DECLARE(Reflection) TORCH_ENUM_DECLARE(RNN_TANH) TORCH_ENUM_DECLARE(RNN_RELU) TORCH_ENUM_DECLARE(LSTM) TORCH_ENUM_DECLARE(GRU) TORCH_ENUM_DECLARE(Valid) TORCH_ENUM_DECLARE(Same) namespace torch::enumtype { struct _compute_enum_name { TORCH_ENUM_PRETTY_PRINT(Linear) TORCH_ENUM_PRETTY_PRINT(Conv1D) TORCH_ENUM_PRETTY_PRINT(Conv2D) TORCH_ENUM_PRETTY_PRINT(Conv3D) TORCH_ENUM_PRETTY_PRINT(ConvTranspose1D) TORCH_ENUM_PRETTY_PRINT(ConvTranspose2D) TORCH_ENUM_PRETTY_PRINT(ConvTranspose3D) TORCH_ENUM_PRETTY_PRINT(Sigmoid) TORCH_ENUM_PRETTY_PRINT(Tanh) TORCH_ENUM_PRETTY_PRINT(ReLU) TORCH_ENUM_PRETTY_PRINT(GELU) TORCH_ENUM_PRETTY_PRINT(SiLU) TORCH_ENUM_PRETTY_PRINT(Mish) TORCH_ENUM_PRETTY_PRINT(LeakyReLU) TORCH_ENUM_PRETTY_PRINT(FanIn) TORCH_ENUM_PRETTY_PRINT(FanOut) TORCH_ENUM_PRETTY_PRINT(Constant) TORCH_ENUM_PRETTY_PRINT(Reflect) TORCH_ENUM_PRETTY_PRINT(Replicate) TORCH_ENUM_PRETTY_PRINT(Circular) TORCH_ENUM_PRETTY_PRINT(Nearest) TORCH_ENUM_PRETTY_PRINT(Bilinear) TORCH_ENUM_PRETTY_PRINT(Bicubic) TORCH_ENUM_PRETTY_PRINT(Trilinear) TORCH_ENUM_PRETTY_PRINT(Area) TORCH_ENUM_PRETTY_PRINT(NearestExact) TORCH_ENUM_PRETTY_PRINT(Sum) TORCH_ENUM_PRETTY_PRINT(Mean) TORCH_ENUM_PRETTY_PRINT(Max) TORCH_ENUM_PRETTY_PRINT(None) TORCH_ENUM_PRETTY_PRINT(BatchMean) TORCH_ENUM_PRETTY_PRINT(Zeros) TORCH_ENUM_PRETTY_PRINT(Border) TORCH_ENUM_PRETTY_PRINT(Reflection) TORCH_ENUM_PRETTY_PRINT(RNN_TANH) TORCH_ENUM_PRETTY_PRINT(RNN_RELU) TORCH_ENUM_PRETTY_PRINT(LSTM) TORCH_ENUM_PRETTY_PRINT(GRU) TORCH_ENUM_PRETTY_PRINT(Valid) TORCH_ENUM_PRETTY_PRINT(Same) }; template <typename V> std::string get_enum_name(V variant_enum) { return std::visit(enumtype::_compute_enum_name{}, variant_enum); } template <typename V> at::Reduction::Reduction reduction_get_enum(V variant_enum) { if (std::holds_alternative<enumtype::kNone>(variant_enum)) { return at::Reduction::None; } else if (std::holds_alternative<enumtype::kMean>(variant_enum)) { return at::Reduction::Mean; } else if (std::holds_alternative<enumtype::kSum>(variant_enum)) { return at::Reduction::Sum; } else { TORCH_CHECK( false, get_enum_name(variant_enum), " is not a valid value for reduction"); return at::Reduction::END; } } } // namespace torch::enumtype ```
==================================================================================================================================================== SOURCE CODE FILE: expanding_array.h LINES: 1 SIZE: 6.69 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\expanding_array.h ENCODING: utf-8 ```h #pragma once #include <c10/util/ArrayRef.h> #include <c10/util/Exception.h> #include <c10/util/irange.h> #include <optional> #include <algorithm> #include <array> #include <cstdint> #include <initializer_list> #include <string> #include <vector> namespace torch { /// A utility class that accepts either a container of `D`-many values, or a /// single value, which is internally repeated `D` times. This is useful to /// represent parameters that are multidimensional, but often equally sized in /// all dimensions. For example, the kernel size of a 2D convolution has an `x` /// and `y` length, but `x` and `y` are often equal. In such a case you could /// just pass `3` to an `ExpandingArray<2>` and it would "expand" to `{3, 3}`. template <size_t D, typename T = int64_t> class ExpandingArray { public: /// Constructs an `ExpandingArray` from an `initializer_list`. The extent of /// the length is checked against the `ExpandingArray`'s extent parameter `D` /// at runtime. /*implicit*/ ExpandingArray(std::initializer_list<T> list) : ExpandingArray(c10::ArrayRef<T>(list)) {} /// Constructs an `ExpandingArray` from an `std::vector`. The extent of /// the length is checked against the `ExpandingArray`'s extent parameter `D` /// at runtime. /*implicit*/ ExpandingArray(std::vector<T> vec) : ExpandingArray(c10::ArrayRef<T>(vec)) {} /// Constructs an `ExpandingArray` from an `c10::ArrayRef`. The extent of /// the length is checked against the `ExpandingArray`'s extent parameter `D` /// at runtime. /*implicit*/ ExpandingArray(c10::ArrayRef<T> values) { // clang-format off TORCH_CHECK( values.size() == D, "Expected ", D, " values, but instead got ", values.size()); // clang-format on std::copy(values.begin(), values.end(), values_.begin()); } /// Constructs an `ExpandingArray` from a single value, which is repeated `D` /// times (where `D` is the extent parameter of the `ExpandingArray`). /*implicit*/ ExpandingArray(T single_size) { values_.fill(single_size); } /// Constructs an `ExpandingArray` from a correctly sized `std::array`. /*implicit*/ ExpandingArray(const std::array<T, D>& values) : values_(values) {} /// Accesses the underlying `std::array`. std::array<T, D>& operator*() { return values_; } /// Accesses the underlying `std::array`. const std::array<T, D>& operator*() const { return values_; } /// Accesses the underlying `std::array`. std::array<T, D>* operator->() { return &values_; } /// Accesses the underlying `std::array`. const std::array<T, D>* operator->() const { return &values_; } /// Returns an `ArrayRef` to the underlying `std::array`. operator c10::ArrayRef<T>() const { return values_; } /// Returns the extent of the `ExpandingArray`. size_t size() const noexcept { return D; } protected: /// The backing array. // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) std::array<T, D> values_; }; template <size_t D, typename T> std::ostream& operator<<( std::ostream& stream, const ExpandingArray<D, T>& expanding_array) { if (expanding_array.size() == 1) { return stream << expanding_array->at(0); } return stream << static_cast<c10::ArrayRef<T>>(expanding_array); } /// A utility class that accepts either a container of `D`-many /// `std::optional<T>` values, or a single `std::optional<T>` value, which is /// internally repeated `D` times. It has the additional ability to accept /// containers of the underlying type `T` and convert them to a container of /// `std::optional<T>`. template <size_t D, typename T = int64_t> class ExpandingArrayWithOptionalElem : public ExpandingArray<D, std::optional<T>> { public: using ExpandingArray<D, std::optional<T>>::ExpandingArray; /// Constructs an `ExpandingArrayWithOptionalElem` from an `initializer_list` /// of the underlying type `T`. The extent of the length is checked against /// the `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime. /*implicit*/ ExpandingArrayWithOptionalElem(std::initializer_list<T> list) : ExpandingArrayWithOptionalElem(c10::ArrayRef<T>(list)) {} /// Constructs an `ExpandingArrayWithOptionalElem` from an `std::vector` of /// the underlying type `T`. The extent of the length is checked against the /// `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime. /*implicit*/ ExpandingArrayWithOptionalElem(std::vector<T> vec) : ExpandingArrayWithOptionalElem(c10::ArrayRef<T>(vec)) {} /// Constructs an `ExpandingArrayWithOptionalElem` from an `c10::ArrayRef` of /// the underlying type `T`. The extent of the length is checked against the /// `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime. /*implicit*/ ExpandingArrayWithOptionalElem(c10::ArrayRef<T> values) : ExpandingArray<D, std::optional<T>>(0) { // clang-format off TORCH_CHECK( values.size() == D, "Expected ", D, " values, but instead got ", values.size()); // clang-format on for (const auto i : c10::irange(this->values_.size())) { this->values_[i] = values[i]; } } /// Constructs an `ExpandingArrayWithOptionalElem` from a single value of the /// underlying type `T`, which is repeated `D` times (where `D` is the extent /// parameter of the `ExpandingArrayWithOptionalElem`). /*implicit*/ ExpandingArrayWithOptionalElem(T single_size) : ExpandingArray<D, std::optional<T>>(0) { for (const auto i : c10::irange(this->values_.size())) { this->values_[i] = single_size; } } /// Constructs an `ExpandingArrayWithOptionalElem` from a correctly sized /// `std::array` of the underlying type `T`. /*implicit*/ ExpandingArrayWithOptionalElem(const std::array<T, D>& values) : ExpandingArray<D, std::optional<T>>(0) { for (const auto i : c10::irange(this->values_.size())) { this->values_[i] = values[i]; } } }; template <size_t D, typename T> std::ostream& operator<<( std::ostream& stream, const ExpandingArrayWithOptionalElem<D, T>& expanding_array_with_opt_elem) { if (expanding_array_with_opt_elem.size() == 1) { const auto& elem = expanding_array_with_opt_elem->at(0); stream << (elem.has_value() ? c10::str(elem.value()) : "None"); } else { std::vector<std::string> str_array; for (const auto& elem : *expanding_array_with_opt_elem) { str_array.emplace_back( elem.has_value() ? c10::str(elem.value()) : "None"); } stream << c10::ArrayRef<std::string>(str_array); } return stream; } } // namespace torch ```
======================================================================================================================================== SOURCE CODE FILE: fft.h LINES: 1 SIZE: 12.20 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\fft.h ENCODING: utf-8 ```h #pragma once #include <ATen/ATen.h> #include <torch/types.h> #include <utility> namespace torch::fft { /// Computes the 1 dimensional fast Fourier transform over a given dimension. /// See https://pytorch.org/docs/main/fft.html#torch.fft.fft. /// /// Example: /// ``` /// auto t = torch::randn(128, dtype=kComplexDouble); /// torch::fft::fft(t); /// ``` inline Tensor fft( const Tensor& self, std::optional<SymInt> n = std::nullopt, int64_t dim = -1, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_fft_symint(self, std::move(n), dim, norm); } /// Computes the 1 dimensional inverse Fourier transform over a given dimension. /// See https://pytorch.org/docs/main/fft.html#torch.fft.ifft. /// /// Example: /// ``` /// auto t = torch::randn(128, dtype=kComplexDouble); /// torch::fft::ifft(t); /// ``` inline Tensor ifft( const Tensor& self, std::optional<SymInt> n = std::nullopt, int64_t dim = -1, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_ifft_symint(self, std::move(n), dim, norm); } /// Computes the 2-dimensional fast Fourier transform over the given dimensions. /// See https://pytorch.org/docs/main/fft.html#torch.fft.fft2. /// /// Example: /// ``` /// auto t = torch::randn({128, 128}, dtype=kComplexDouble); /// torch::fft::fft2(t); /// ``` inline Tensor fft2( const Tensor& self, OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_fft2(self, s, dim, norm); } /// Computes the inverse of torch.fft.fft2 /// See https://pytorch.org/docs/main/fft.html#torch.fft.ifft2. /// /// Example: /// ``` /// auto t = torch::randn({128, 128}, dtype=kComplexDouble); /// torch::fft::ifft2(t); /// ``` inline Tensor ifft2( const Tensor& self, at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_ifft2(self, s, dim, norm); } /// Computes the N dimensional fast Fourier transform over given dimensions. /// See https://pytorch.org/docs/main/fft.html#torch.fft.fftn. /// /// Example: /// ``` /// auto t = torch::randn({128, 128}, dtype=kComplexDouble); /// torch::fft::fftn(t); /// ``` inline Tensor fftn( const Tensor& self, at::OptionalIntArrayRef s = std::nullopt, at::OptionalIntArrayRef dim = std::nullopt, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_fftn(self, s, dim, norm); } /// Computes the N dimensional fast Fourier transform over given dimensions. /// See https://pytorch.org/docs/main/fft.html#torch.fft.ifftn. /// /// Example: /// ``` /// auto t = torch::randn({128, 128}, dtype=kComplexDouble); /// torch::fft::ifftn(t); /// ``` inline Tensor ifftn( const Tensor& self, at::OptionalIntArrayRef s = std::nullopt, at::OptionalIntArrayRef dim = std::nullopt, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_ifftn(self, s, dim, norm); } /// Computes the 1 dimensional FFT of real input with onesided Hermitian output. /// See https://pytorch.org/docs/main/fft.html#torch.fft.rfft. /// /// Example: /// ``` /// auto t = torch::randn(128); /// auto T = torch::fft::rfft(t); /// assert(T.is_complex() && T.numel() == 128 / 2 + 1); /// ``` inline Tensor rfft( const Tensor& self, std::optional<SymInt> n = std::nullopt, int64_t dim = -1, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_rfft_symint(self, std::move(n), dim, norm); } /// Computes the inverse of torch.fft.rfft /// /// The input is a onesided Hermitian Fourier domain signal, with real-valued /// output. See https://pytorch.org/docs/main/fft.html#torch.fft.irfft /// /// Example: /// ``` /// auto T = torch::randn(128 / 2 + 1, torch::kComplexDouble); /// auto t = torch::fft::irfft(t, /*n=*/128); /// assert(t.is_floating_point() && T.numel() == 128); /// ``` inline Tensor irfft( const Tensor& self, std::optional<SymInt> n = std::nullopt, int64_t dim = -1, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_irfft_symint(self, std::move(n), dim, norm); } /// Computes the 2-dimensional FFT of real input. Returns a onesided Hermitian /// output. See https://pytorch.org/docs/main/fft.html#torch.fft.rfft2 /// /// Example: /// ``` /// auto t = torch::randn({128, 128}, dtype=kDouble); /// torch::fft::rfft2(t); /// ``` inline Tensor rfft2( const Tensor& self, at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_rfft2(self, s, dim, norm); } /// Computes the inverse of torch.fft.rfft2. /// See https://pytorch.org/docs/main/fft.html#torch.fft.irfft2. /// /// Example: /// ``` /// auto t = torch::randn({128, 128}, dtype=kComplexDouble); /// torch::fft::irfft2(t); /// ``` inline Tensor irfft2( const Tensor& self, at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_irfft2(self, s, dim, norm); } /// Computes the N dimensional FFT of real input with onesided Hermitian output. /// See https://pytorch.org/docs/main/fft.html#torch.fft.rfftn /// /// Example: /// ``` /// auto t = torch::randn({128, 128}, dtype=kDouble); /// torch::fft::rfftn(t); /// ``` inline Tensor rfftn( const Tensor& self, at::OptionalIntArrayRef s = std::nullopt, at::OptionalIntArrayRef dim = std::nullopt, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_rfftn(self, s, dim, norm); } /// Computes the inverse of torch.fft.rfftn. /// See https://pytorch.org/docs/main/fft.html#torch.fft.irfftn. /// /// Example: /// ``` /// auto t = torch::randn({128, 128}, dtype=kComplexDouble); /// torch::fft::irfftn(t); /// ``` inline Tensor irfftn( const Tensor& self, at::OptionalIntArrayRef s = std::nullopt, at::OptionalIntArrayRef dim = std::nullopt, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_irfftn(self, s, dim, norm); } /// Computes the 1 dimensional FFT of a onesided Hermitian signal /// /// The input represents a Hermitian symmetric time domain signal. The returned /// Fourier domain representation of such a signal is a real-valued. See /// https://pytorch.org/docs/main/fft.html#torch.fft.hfft /// /// Example: /// ``` /// auto t = torch::randn(128 / 2 + 1, torch::kComplexDouble); /// auto T = torch::fft::hfft(t, /*n=*/128); /// assert(T.is_floating_point() && T.numel() == 128); /// ``` inline Tensor hfft( const Tensor& self, std::optional<SymInt> n = std::nullopt, int64_t dim = -1, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_hfft_symint(self, std::move(n), dim, norm); } /// Computes the inverse FFT of a real-valued Fourier domain signal. /// /// The output is a onesided representation of the Hermitian symmetric time /// domain signal. See https://pytorch.org/docs/main/fft.html#torch.fft.ihfft. /// /// Example: /// ``` /// auto T = torch::randn(128, torch::kDouble); /// auto t = torch::fft::ihfft(T); /// assert(t.is_complex() && T.numel() == 128 / 2 + 1); /// ``` inline Tensor ihfft( const Tensor& self, std::optional<SymInt> n = std::nullopt, int64_t dim = -1, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_ihfft_symint(self, std::move(n), dim, norm); } /// Computes the 2-dimensional FFT of a Hermitian symmetric input signal. /// /// The input is a onesided representation of the Hermitian symmetric time /// domain signal. See https://pytorch.org/docs/main/fft.html#torch.fft.hfft2. /// /// Example: /// ``` /// auto t = torch::randn({128, 65}, torch::kComplexDouble); /// auto T = torch::fft::hfft2(t, /*s=*/{128, 128}); /// assert(T.is_floating_point() && T.numel() == 128 * 128); /// ``` inline Tensor hfft2( const Tensor& self, at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_hfft2(self, s, dim, norm); } /// Computes the 2-dimensional IFFT of a real input signal. /// /// The output is a onesided representation of the Hermitian symmetric time /// domain signal. See /// https://pytorch.org/docs/main/fft.html#torch.fft.ihfft2. /// /// Example: /// ``` /// auto T = torch::randn({128, 128}, torch::kDouble); /// auto t = torch::fft::hfft2(T); /// assert(t.is_complex() && t.size(1) == 65); /// ``` inline Tensor ihfft2( const Tensor& self, at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_ihfft2(self, s, dim, norm); } /// Computes the N-dimensional FFT of a Hermitian symmetric input signal. /// /// The input is a onesided representation of the Hermitian symmetric time /// domain signal. See https://pytorch.org/docs/main/fft.html#torch.fft.hfftn. /// /// Example: /// ``` /// auto t = torch::randn({128, 65}, torch::kComplexDouble); /// auto T = torch::fft::hfftn(t, /*s=*/{128, 128}); /// assert(T.is_floating_point() && T.numel() == 128 * 128); /// ``` inline Tensor hfftn( const Tensor& self, at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_hfftn(self, s, dim, norm); } /// Computes the N-dimensional IFFT of a real input signal. /// /// The output is a onesided representation of the Hermitian symmetric time /// domain signal. See /// https://pytorch.org/docs/main/fft.html#torch.fft.ihfftn. /// /// Example: /// ``` /// auto T = torch::randn({128, 128}, torch::kDouble); /// auto t = torch::fft::hfft2(T); /// assert(t.is_complex() && t.size(1) == 65); /// ``` inline Tensor ihfftn( const Tensor& self, at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, std::optional<std::string_view> norm = std::nullopt) { return torch::fft_ihfftn(self, s, dim, norm); } /// Computes the discrete Fourier Transform sample frequencies for a signal of /// size n. /// /// See https://pytorch.org/docs/main/fft.html#torch.fft.fftfreq /// /// Example: /// ``` /// auto frequencies = torch::fft::fftfreq(128, torch::kDouble); /// ``` inline Tensor fftfreq(int64_t n, double d, const TensorOptions& options = {}) { return torch::fft_fftfreq(n, d, options); } inline Tensor fftfreq(int64_t n, const TensorOptions& options = {}) { return torch::fft_fftfreq(n, /*d=*/1.0, options); } /// Computes the sample frequencies for torch.fft.rfft with a signal of size n. /// /// Like torch.fft.rfft, only the positive frequencies are included. /// See https://pytorch.org/docs/main/fft.html#torch.fft.rfftfreq /// /// Example: /// ``` /// auto frequencies = torch::fft::rfftfreq(128, torch::kDouble); /// ``` inline Tensor rfftfreq(int64_t n, double d, const TensorOptions& options) { return torch::fft_rfftfreq(n, d, options); } inline Tensor rfftfreq(int64_t n, const TensorOptions& options) { return torch::fft_rfftfreq(n, /*d=*/1.0, options); } /// Reorders n-dimensional FFT output to have negative frequency terms first, by /// a torch.roll operation. /// /// See https://pytorch.org/docs/main/fft.html#torch.fft.fftshift /// /// Example: /// ``` /// auto x = torch::randn({127, 4}); /// auto centred_fft = torch::fft::fftshift(torch::fft::fftn(x)); /// ``` inline Tensor fftshift( const Tensor& x, at::OptionalIntArrayRef dim = std::nullopt) { return torch::fft_fftshift(x, dim); } /// Inverse of torch.fft.fftshift /// /// See https://pytorch.org/docs/main/fft.html#torch.fft.ifftshift /// /// Example: /// ``` /// auto x = torch::randn({127, 4}); /// auto shift = torch::fft::fftshift(x) /// auto unshift = torch::fft::ifftshift(shift); /// assert(torch::allclose(x, unshift)); /// ``` inline Tensor ifftshift( const Tensor& x, at::OptionalIntArrayRef dim = std::nullopt) { return torch::fft_ifftshift(x, dim); } } // namespace torch::fft ```
============================================================================================================================================ SOURCE CODE FILE: imethod.h LINES: 1 SIZE: 1.75 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\imethod.h ENCODING: utf-8 ```h #pragma once #include <ATen/core/ivalue.h> #include <vector> namespace torch { class TORCH_API IMethod { /* IMethod provides a portable interface for torch methods, whether they are backed by torchscript or python/deploy. This is helpful since torchscript methods provide additional information (e.g. FunctionSchema, Graph) which aren't available in pure python methods. Higher level APIs should prefer depending on this interface rather than a specific implementation of it, to promote portability and reuse, and avoid unintentional dependencies on e.g. script methods. Note: This API is experimental, and may evolve. */ public: using IValueList = std::vector<c10::IValue>; using IValueMap = std::unordered_map<std::string, at::IValue>; IMethod() = default; IMethod(const IMethod&) = default; IMethod& operator=(const IMethod&) = default; IMethod(IMethod&&) noexcept = default; IMethod& operator=(IMethod&&) noexcept = default; virtual ~IMethod() = default; virtual c10::IValue operator()( std::vector<c10::IValue> args, const IValueMap& kwargs = IValueMap()) const = 0; virtual const std::string& name() const = 0; // Returns an ordered list of argument names, possible in both // script and python methods. This is a more portable dependency // than a ScriptMethod FunctionSchema, which has more information // than can be generally expected from a python method. const std::vector<std::string>& getArgumentNames() const; protected: virtual void setArgumentNames( std::vector<std::string>& argumentNames) const = 0; private: mutable bool isArgumentNamesInitialized_{false}; mutable std::vector<std::string> argumentNames_; }; } // namespace torch ```
======================================================================================================================================== SOURCE CODE FILE: jit.h LINES: 1 SIZE: 0.90 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\jit.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <torch/csrc/jit/api/module.h> #include <memory> #include <string> namespace torch::jit { /// Compiles script code into an executable graph. /// /// Takes a string containing functions in script syntax and compiles them into /// a module (graph). The returned module provides a `run_method` function /// that may be used to invoke the compiled functions. /// /// For example: /// \rst /// .. code-block:: cpp /// /// auto module = torch::jit::compile(R"JIT( /// def relu_script(a, b): /// return torch.relu(a + b) /// def test_while(a, i): /// while i < 10: /// a += a /// i += 1 /// return a /// )JIT"); /// IValue output = module->run_method("relu_script", a, b); /// \endrst TORCH_API std::shared_ptr<CompilationUnit> compile(const std::string& source); } // namespace torch::jit ```
======================================================================================================================================== SOURCE CODE FILE: mps.h LINES: 1 SIZE: 1.21 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\mps.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <cstddef> #include <cstdint> #ifdef __OBJC__ #include <Foundation/Foundation.h> #include <Metal/Metal.h> using MTLCommandBuffer_t = id<MTLCommandBuffer>; using DispatchQueue_t = dispatch_queue_t; #else using MTLCommandBuffer_t = void*; using DispatchQueue_t = void*; #endif namespace torch::mps { /// Returns true if MPS device is available. bool TORCH_API is_available(); /// Sets the RNG seed for the MPS device. void TORCH_API manual_seed(uint64_t seed); /// Waits for all streams on the MPS device to complete. /// This blocks the calling CPU thread by using the 'waitUntilCompleted()' /// method to wait for Metal command buffers finish executing all the /// encoded GPU operations before returning. void TORCH_API synchronize(); /// Submits the currently active command buffer to run on the MPS device. void TORCH_API commit(); /// Get the current command buffer to encode the Metal commands. MTLCommandBuffer_t TORCH_API get_command_buffer(); /// Get the dispatch_queue_t to synchronize encoding the custom kernels /// with the PyTorch MPS backend. DispatchQueue_t TORCH_API get_dispatch_queue(); } // namespace torch::mps ```
=========================================================================================================================================== SOURCE CODE FILE: nested.h LINES: 1 SIZE: 2.80 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nested.h ENCODING: utf-8 ```h #pragma once #include <ATen/ATen.h> #include <ATen/core/ATen_fwd.h> #include <torch/csrc/api/include/torch/detail/TensorDataContainer.h> #include <algorithm> namespace torch::nested { /// Nested tensor /// /// See /// https://pytorch.org/docs/main/nested.html#torch.nested.nested_tensor /// /// ``` // implemented on python object to allow torch.nested.nested_tensor to be // constructed with arbitrarily nested python objects - for now, only arbitrary // python lists and lists of Tensors // See torch/csrc/autograd/python_nested_functions_manual.cpp for Python // implementation // See here for C++ implementation inline at::Tensor nested_tensor( at::TensorList nested_tensor_data, const at::TensorOptions& options = {}) { auto out = at::_nested_tensor_from_tensor_list( nested_tensor_data, c10::typeMetaToScalarType(options.dtype()), std::nullopt, options.device(), options.pinned_memory()); if (options.has_requires_grad() && options.requires_grad()) { out.requires_grad_(true); } return out; } inline at::Tensor nested_tensor( at::ArrayRef<detail::TensorDataContainer> nested_tensor_data, const at::TensorOptions& options = {}) { for (const auto& tdc : nested_tensor_data) { TORCH_CHECK( tdc.is_init_list(), "nested_tensor() not implemented for these parameters"); } // Construct a TensorList using nested_tensor_data std::vector<at::Tensor> tensor_list(nested_tensor_data.size()); std::transform( nested_tensor_data.begin(), nested_tensor_data.end(), tensor_list.begin(), [&](const detail::TensorDataContainer& tdc) { return tdc.convert_to_tensor(options); }); auto out = at::_nested_tensor_from_tensor_list( tensor_list, c10::typeMetaToScalarType(options.dtype()), std::nullopt, options.device(), options.pinned_memory()); if (options.has_requires_grad() && options.requires_grad()) { out.requires_grad_(true); } return out; } /// As Nested Tensor /// /// See /// https://pytorch.org/docs/main/nested.html#torch.nested.as_nested_tensor /// /// ``` inline at::Tensor as_nested_tensor( at::TensorList list, std::optional<at::ScalarType> dtype = std::nullopt, std::optional<at::Device> device = std::nullopt) { return at::_nested_tensor_from_tensor_list( list, dtype, std::nullopt, device, std::nullopt); } /// Nested to padded tensor /// /// See /// https://pytorch.org/docs/main/nested.html#torch.nested.to_padded_tensor /// /// ``` inline at::Tensor to_padded_tensor( const at::Tensor& self, double padding, at::OptionalIntArrayRef output_size = std::nullopt) { return at::nested_to_padded_tensor(self, padding, output_size); } } // namespace torch::nested ```
======================================================================================================================================= SOURCE CODE FILE: nn.h LINES: 1 SIZE: 0.25 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/cloneable.h> #include <torch/nn/functional.h> #include <torch/nn/init.h> #include <torch/nn/module.h> #include <torch/nn/modules.h> #include <torch/nn/options.h> #include <torch/nn/pimpl.h> #include <torch/nn/utils.h> ```
================================================================================================================================================= SOURCE CODE FILE: cloneable.h LINES: 1 SIZE: 3.90 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\cloneable.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/module.h> #include <torch/types.h> #include <torch/utils.h> #include <c10/core/TensorOptions.h> #include <c10/util/Exception.h> #include <memory> #include <utility> namespace torch::nn { /// The `clone()` method in the base `Module` class does not have knowledge of /// the concrete runtime type of its subclasses. Therefore, `clone()` must /// either be called from within the subclass, or from a base class that has /// knowledge of the concrete type. `Cloneable` uses the CRTP to gain /// knowledge of the subclass' static type and provide an implementation of the /// `clone()` method. We do not want to use this pattern in the base class, /// because then storing a module would always require templatizing it. template <typename Derived> // NOLINTNEXTLINE(bugprone-exception-escape) class Cloneable : public Module { public: using Module::Module; /// `reset()` must perform initialization of all members with reference /// semantics, most importantly parameters, buffers and submodules. virtual void reset() = 0; /// Performs a recursive "deep copy" of the `Module`, such that all parameters /// and submodules in the cloned module are different from those in the /// original module. std::shared_ptr<Module> clone( const std::optional<Device>& device = std::nullopt) const override { NoGradGuard no_grad; const auto& self = static_cast<const Derived&>(*this); auto copy = std::make_shared<Derived>(self); copy->parameters_.clear(); copy->buffers_.clear(); copy->children_.clear(); copy->reset(); TORCH_CHECK( copy->parameters_.size() == parameters_.size(), "The cloned module does not have the same number of " "parameters as the original module after calling reset(). " "Are you sure you called register_parameter() inside reset() " "and not the constructor?"); for (const auto& parameter : named_parameters(/*recurse=*/false)) { auto& tensor = *parameter; auto data = device && tensor.device() != *device ? tensor.to(*device) : tensor.clone(); copy->parameters_[parameter.key()].set_data(data); } TORCH_CHECK( copy->buffers_.size() == buffers_.size(), "The cloned module does not have the same number of " "buffers as the original module after calling reset(). " "Are you sure you called register_buffer() inside reset() " "and not the constructor?"); for (const auto& buffer : named_buffers(/*recurse=*/false)) { auto& tensor = *buffer; auto data = device && tensor.device() != *device ? tensor.to(*device) : tensor.clone(); copy->buffers_[buffer.key()].set_data(data); } TORCH_CHECK( copy->children_.size() == children_.size(), "The cloned module does not have the same number of " "child modules as the original module after calling reset(). " "Are you sure you called register_module() inside reset() " "and not the constructor?"); for (const auto& child : children_) { copy->children_[child.key()]->clone_(*child.value(), device); } return copy; } private: void clone_(Module& other, const std::optional<Device>& device) final { // Here we are *pretty* certain that `other's` type is `Derived` (because it // was registered under the same name as `this`), but you never know what // crazy things `reset()` does, so `dynamic_cast` just to be safe. auto clone = std::dynamic_pointer_cast<Derived>(other.clone(device)); TORCH_CHECK( clone != nullptr, "Attempted to clone submodule, but it is of a " "different type than the submodule it was to be cloned into"); static_cast<Derived&>(*this) = *clone; } }; } // namespace torch::nn ```
================================================================================================================================================== SOURCE CODE FILE: functional.h LINES: 1 SIZE: 0.64 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/functional/batchnorm.h> #include <torch/nn/functional/conv.h> #include <torch/nn/functional/distance.h> #include <torch/nn/functional/dropout.h> #include <torch/nn/functional/embedding.h> #include <torch/nn/functional/fold.h> #include <torch/nn/functional/instancenorm.h> #include <torch/nn/functional/linear.h> #include <torch/nn/functional/loss.h> #include <torch/nn/functional/normalization.h> #include <torch/nn/functional/padding.h> #include <torch/nn/functional/pixelshuffle.h> #include <torch/nn/functional/pooling.h> #include <torch/nn/functional/upsampling.h> #include <torch/nn/functional/vision.h> ```
============================================================================================================================================================= SOURCE CODE FILE: activation.h LINES: 1 SIZE: 30.01 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\activation.h ENCODING: utf-8 ```h #pragma once #include <ATen/Dispatch.h> #include <torch/nn/functional/dropout.h> #include <torch/nn/functional/linear.h> #include <torch/nn/options/activation.h> #include <torch/nn/options/dropout.h> #include <torch/nn/options/linear.h> #include <torch/types.h> #include <limits> #include <utility> namespace torch::nn::functional { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor elu(Tensor input, double alpha, bool inplace) { if (inplace) { return torch::elu_(input, alpha); } else { return torch::elu(input, alpha); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.elu /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::ELUFuncOptions` class to /// learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true)); /// ``` inline Tensor elu(Tensor input, const ELUFuncOptions& options = {}) { return detail::elu(std::move(input), options.alpha(), options.inplace()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor selu(Tensor input, bool inplace) { if (inplace) { return torch::selu_(input); } else { return torch::selu(input); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.selu /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::SELUFuncOptions` class to /// learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::selu(input, F::SELUFuncOptions(false)); /// ``` inline Tensor selu(Tensor input, const SELUFuncOptions& options = {}) { return detail::selu(std::move(input), options.inplace()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor hardshrink(const Tensor& input, double lambda) { return torch::hardshrink(input, lambda); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.hardshrink /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::HardshrinkFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42)); /// ``` inline Tensor hardshrink( const Tensor& input, const HardshrinkFuncOptions& options = {}) { return detail::hardshrink(input, options.lambda()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor hardtanh( Tensor input, double min_val, double max_val, bool inplace) { if (inplace) { return torch::hardtanh_(input, min_val, max_val); } else { return torch::hardtanh(input, min_val, max_val); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.hardtanh /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::HardtanhFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::hardtanh(x, /// F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true)); /// ``` inline Tensor hardtanh(Tensor input, const HardtanhFuncOptions& options = {}) { return detail::hardtanh( std::move(input), options.min_val(), options.max_val(), options.inplace()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor leaky_relu(Tensor input, double negative_slope, bool inplace) { if (inplace) { return torch::leaky_relu_(input, negative_slope); } else { return torch::leaky_relu(input, negative_slope); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.leaky_relu /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::LeakyReLUFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::leaky_relu(x, /// F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true)); /// ``` inline Tensor leaky_relu( Tensor input, const LeakyReLUFuncOptions& options = {}) { return detail::leaky_relu( std::move(input), options.negative_slope(), options.inplace()); } // ============================================================================ inline Tensor logsigmoid(const Tensor& input) { return torch::log_sigmoid(input); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor gumbel_softmax( const Tensor& logits, double tau, bool hard, int dim) { auto gumbels = -torch::empty_like(logits).exponential_().log(); // ~Gumbel(0,1) gumbels = (logits + gumbels) / tau; // ~Gumbel(logits, tau) auto y_soft = gumbels.softmax(dim); torch::Tensor ret; if (hard) { // Straight through. auto index = std::get<1>(y_soft.max(dim, /*keepdim=*/true)); auto y_hard = torch::zeros_like(logits).scatter_(dim, index, 1.0); ret = y_hard - y_soft.detach() + y_soft; } else { ret = y_soft; } return ret; } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.gumbel_softmax /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::GumbelSoftmaxFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(-1)); /// ``` inline Tensor gumbel_softmax( const Tensor& logits, const GumbelSoftmaxFuncOptions& options = {}) { return detail::gumbel_softmax( logits, options.tau(), options.hard(), options.dim()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor softmax( const Tensor& input, int64_t dim, std::optional<torch::Dtype> dtype) { Tensor ret; if (dtype == std::nullopt) { ret = input.softmax(dim); } else { ret = input.softmax(dim, dtype); } return ret; } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.softmax /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::SoftmaxFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::softmax(input, F::SoftmaxFuncOptions(1)); /// ``` inline Tensor softmax(const Tensor& input, const SoftmaxFuncOptions& options) { return detail::softmax(input, options.dim(), options.dtype()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor softmin( const Tensor& input, int64_t dim, std::optional<torch::Dtype> dtype) { Tensor ret; if (dtype == std::nullopt) { ret = (-input).softmax(dim); } else { ret = (-input).softmax(dim, dtype); } return ret; } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.softmin /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::SoftminFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::softmin(input, F::SoftminFuncOptions(1)); /// ``` inline Tensor softmin(const Tensor& input, const SoftminFuncOptions& options) { return detail::softmin(input, options.dim(), options.dtype()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor log_softmax( const Tensor& input, int64_t dim, std::optional<torch::Dtype> dtype) { Tensor ret; if (dtype == std::nullopt) { ret = input.log_softmax(dim); } else { ret = input.log_softmax(dim, dtype); } return ret; } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.log_softmax /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::LogSoftmaxFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::log_softmax(input, LogSoftmaxFuncOptions(1)); /// ``` inline Tensor log_softmax( const Tensor& input, const LogSoftmaxFuncOptions& options) { return detail::log_softmax(input, options.dim(), options.dtype()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor glu(const Tensor& input, int64_t dim) { TORCH_CHECK( input.dim() != 0, "glu does not suppport scalars because halving size must be even"); return torch::glu(input, dim); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.glu /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::GLUFuncOptions` class to /// learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::glu(input, GLUFuncOptions(1)); /// ``` inline Tensor glu(const Tensor& input, const GLUFuncOptions& options = {}) { return detail::glu(input, options.dim()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor gelu(const Tensor& input, const string& approximate) { return torch::gelu(input, approximate); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ inline Tensor gelu(const Tensor& input, const GELUFuncOptions& options = {}) { return detail::gelu(input, options.approximate()); } // ============================================================================ inline Tensor silu(const Tensor& input) { return torch::silu(input); } // ============================================================================ inline Tensor mish(const Tensor& input) { return torch::mish(input); } // ============================================================================ inline Tensor prelu(const Tensor& input, const Tensor& weight) { return torch::prelu(input, weight); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor relu(Tensor input, bool inplace) { if (inplace) { return torch::relu_(input); } else { return torch::relu(input); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.relu /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::ReLUFuncOptions` class to /// learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::relu(x, F::ReLUFuncOptions().inplace(true)); /// ``` inline Tensor relu(Tensor input, const ReLUFuncOptions& options = {}) { return detail::relu(std::move(input), options.inplace()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor relu6(Tensor input, bool inplace) { if (inplace) { return torch::relu6_(input); } else { return torch::relu6(input); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.relu6 /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::ReLU6FuncOptions` class to /// learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::relu6(x, F::ReLU6FuncOptions().inplace(true)); /// ``` inline Tensor relu6(Tensor input, const ReLU6FuncOptions& options = {}) { return detail::relu6(std::move(input), options.inplace()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor rrelu( Tensor input, double lower, double upper, bool training, bool inplace) { if (inplace) { return torch::rrelu_(input, lower, upper, training); } else { return torch::rrelu(input, lower, upper, training); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.rrelu /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::RReLUFuncOptions` class to /// learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::rrelu(x, F::RReLUFuncOptions().lower(0.1).upper(0.4).inplace(true)); /// ``` inline Tensor rrelu(Tensor input, const RReLUFuncOptions& options = {}) { return detail::rrelu( std::move(input), options.lower(), options.upper(), options.training(), options.inplace()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor celu(Tensor input, double alpha, bool inplace) { if (inplace) { return torch::celu_(input, alpha); } else { return torch::celu(input, alpha); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.celu /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::CELUFuncOptions` class to /// learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true)); /// ``` inline Tensor celu(Tensor input, const CELUFuncOptions& options = {}) { return detail::celu(std::move(input), options.alpha(), options.inplace()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor softplus(const Tensor& input, double beta, double threshold) { return torch::softplus(input, beta, threshold); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.softplus /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::SoftplusFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0)); /// ``` inline Tensor softplus( const Tensor& input, const SoftplusFuncOptions& options = {}) { return detail::softplus(input, options.beta(), options.threshold()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor softshrink(const Tensor& input, double lambda) { return torch::softshrink(input, lambda); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.softshrink /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::SoftshrinkFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::softshrink(x, F::SoftshrinkFuncOptions(0.42)); /// ``` inline Tensor softshrink( const Tensor& input, const SoftshrinkFuncOptions& options = {}) { return detail::softshrink(input, options.lambda()); } // ============================================================================ inline Tensor softsign(const Tensor& input) { return input / (input.abs() + 1); } // ============================================================================ inline Tensor tanhshrink(const Tensor& input) { return input - input.tanh(); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor threshold( Tensor input, double threshold, double value, bool inplace) { if (inplace) { return torch::threshold_(input, threshold, value); } else { return torch::threshold(input, threshold, value); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.threshold /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::ThresholdFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true)); /// ``` inline Tensor threshold(Tensor input, const ThresholdFuncOptions& options) { return detail::threshold( std::move(input), options.threshold(), options.value(), options.inplace()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline std::tuple<Tensor, Tensor> multi_head_attention_forward( const Tensor& query, const Tensor& key, const Tensor& value, int64_t embed_dim_to_check, int64_t num_heads, const Tensor& in_proj_weight, const Tensor& in_proj_bias, const Tensor& bias_k, const Tensor& bias_v, bool add_zero_attn, double dropout_p, const Tensor& out_proj_weight, const Tensor& out_proj_bias, bool training = true, const Tensor& key_padding_mask = {}, bool need_weights = true, const Tensor& attn_mask = {}, bool use_separate_proj_weight = false, const Tensor& q_proj_weight = {}, const Tensor& k_proj_weight = {}, const Tensor& v_proj_weight = {}, const Tensor& static_k = {}, const Tensor& static_v = {}, bool average_attn_weights = true) { namespace F = torch::nn::functional; const auto query_sizes = query.sizes(); const auto& tgt_len = query_sizes[0]; const auto& bsz = query_sizes[1]; const auto& embed_dim = query_sizes[2]; TORCH_INTERNAL_ASSERT(embed_dim == embed_dim_to_check); TORCH_INTERNAL_ASSERT(key.sizes() == value.sizes()); const auto head_dim = embed_dim / num_heads; TORCH_CHECK( head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"); const auto scaling = 1 / std::sqrt(head_dim); Tensor q, k, v; if (!use_separate_proj_weight) { if (torch::equal(query, key) && torch::equal(key, value)) { // self-attention const auto chunks = F::linear(query, in_proj_weight, in_proj_bias).chunk(3, /*dim=*/-1); q = chunks[0]; k = chunks[1]; v = chunks[2]; } else if (torch::equal(key, value)) { // encoder-decoder attention // This is inline in_proj function with in_proj_weight and in_proj_bias auto _b = in_proj_bias; int64_t _start = 0; auto _end = embed_dim; auto _w = in_proj_weight.slice(/*dim=*/0, _start, _end); if (_b.defined()) { _b = _b.slice(/*dim=*/0, _start, _end); } q = F::linear(query, _w, _b); if (!key.defined()) { TORCH_INTERNAL_ASSERT(!value.defined()); k.reset(); v.reset(); } else { // This is inline in_proj function with in_proj_weight and in_proj_bias _b = in_proj_bias; _start = embed_dim; _w = in_proj_weight.slice(/*dim=*/0, _start); if (_b.defined()) { _b = _b.slice(/*dim=*/0, _start); } const auto chunks = F::linear(key, _w, _b).chunk(2, /*dim=*/-1); k = chunks[0]; v = chunks[1]; } } else { // This is inline in_proj function with in_proj_weight and in_proj_bias auto _b = in_proj_bias; int64_t _start = 0; auto _end = embed_dim; auto _w = in_proj_weight.slice(/*dim=*/0, _start, _end); if (_b.defined()) { _b = _b.slice(/*dim=*/0, _start, _end); } q = F::linear(query, _w, _b); // This is inline in_proj function with in_proj_weight and in_proj_bias _b = in_proj_bias; _start = embed_dim; _end = embed_dim * 2; _w = in_proj_weight.slice(/*dim=*/0, _start, _end); if (_b.defined()) { _b = _b.slice(/*dim=*/0, _start, _end); } k = F::linear(key, _w, _b); // This is inline in_proj function with in_proj_weight and in_proj_bias _b = in_proj_bias; _start = embed_dim * 2; _w = in_proj_weight.slice(/*dim=*/0, _start); if (_b.defined()) { _b = _b.slice(0, _start); } v = F::linear(value, _w, _b); } } else { const auto& q_proj_weight_non_opt = q_proj_weight; { const auto sizes = q_proj_weight_non_opt.sizes(); const auto len1 = sizes[0]; const auto len2 = sizes[1]; TORCH_CHECK(len1 == embed_dim && len2 == query.size(-1)); } const auto& k_proj_weight_non_opt = k_proj_weight; { const auto sizes = k_proj_weight_non_opt.sizes(); const auto len1 = sizes[0]; const auto len2 = sizes[1]; TORCH_CHECK(len1 == embed_dim && len2 == key.size(-1)); } const auto& v_proj_weight_non_opt = v_proj_weight; { const auto sizes = v_proj_weight_non_opt.sizes(); const auto len1 = sizes[0]; const auto len2 = sizes[1]; TORCH_CHECK(len1 == embed_dim && len2 == value.size(-1)); } if (in_proj_bias.defined()) { q = F::linear( query, q_proj_weight_non_opt, in_proj_bias.slice(/*dim=*/0, 0, embed_dim)); k = F::linear( key, k_proj_weight_non_opt, in_proj_bias.slice(/*dim=*/0, embed_dim, (embed_dim * 2))); v = F::linear( value, v_proj_weight_non_opt, in_proj_bias.slice(/*dim=*/0, (embed_dim * 2))); } else { q = F::linear(query, q_proj_weight_non_opt, in_proj_bias); k = F::linear(key, k_proj_weight_non_opt, in_proj_bias); v = F::linear(value, v_proj_weight_non_opt, in_proj_bias); } } q = q * scaling; Tensor attn_mask_ = attn_mask; Tensor key_padding_mask_ = key_padding_mask; if (bias_k.defined() && bias_v.defined()) { if (!static_k.defined() && !static_v.defined()) { k = torch::cat({k, bias_k.repeat({1, bsz, 1})}); v = torch::cat({v, bias_v.repeat({1, bsz, 1})}); if (attn_mask_.defined()) { attn_mask_ = torch::cat( {attn_mask_, torch::zeros( {attn_mask_.size(0), 1}, at::TensorOptions(attn_mask_.dtype()) .device(attn_mask_.device()))}, /*dim=*/1); } if (key_padding_mask_.defined()) { key_padding_mask_ = torch::cat( {key_padding_mask_, torch::zeros( {key_padding_mask_.size(0), 1}, at::TensorOptions(key_padding_mask_.dtype()) .device(key_padding_mask_.device()))}, /*dim=*/1); } } else { TORCH_CHECK(!static_k.defined(), "bias cannot be added to static key."); TORCH_CHECK(!static_v.defined(), "bias cannot be added to static value."); } } else { TORCH_CHECK(!bias_k.defined()); TORCH_CHECK(!bias_v.defined()); } q = q.contiguous().view({tgt_len, bsz * num_heads, head_dim}).transpose(0, 1); if (k.defined()) { k = k.contiguous().view({-1, bsz * num_heads, head_dim}).transpose(0, 1); } if (v.defined()) { v = v.contiguous().view({-1, bsz * num_heads, head_dim}).transpose(0, 1); } if (static_k.defined()) { TORCH_CHECK(static_k.size(0) == bsz * num_heads); TORCH_CHECK(static_k.size(2) == head_dim); k = static_k; } if (static_v.defined()) { TORCH_CHECK(static_v.size(0) == bsz * num_heads); TORCH_CHECK(static_v.size(2) == head_dim); v = static_v; } auto src_len = k.size(1); if (key_padding_mask_.defined()) { TORCH_CHECK(key_padding_mask_.size(0) == bsz); TORCH_CHECK(key_padding_mask_.size(1) == src_len); } if (add_zero_attn) { src_len += 1; auto k_sizes = k.sizes().vec(); k_sizes[1] = 1; k = torch::cat( {k, torch::zeros( k_sizes, at::TensorOptions(k.dtype()).device(k.device()))}, /*dim=*/1); auto v_sizes = v.sizes().vec(); v_sizes[1] = 1; v = torch::cat( {v, torch::zeros( v_sizes, at::TensorOptions(v.dtype()).device(v.device()))}, /*dim=*/1); if (attn_mask_.defined()) { attn_mask_ = torch::cat( {attn_mask_, torch::zeros( {attn_mask_.size(0), 1}, at::TensorOptions(attn_mask_.dtype()) .device(attn_mask_.device()))}, /*dim=*/1); } if (key_padding_mask_.defined()) { key_padding_mask_ = torch::cat( {key_padding_mask_, torch::zeros( {key_padding_mask_.size(0), 1}, at::TensorOptions(key_padding_mask_.dtype()) .device(key_padding_mask_.device()))}, /*dim=*/1); } } auto attn_output_weights = torch::bmm(q, k.transpose(1, 2)); TORCH_CHECK( attn_output_weights.sizes() == IntArrayRef({bsz * num_heads, tgt_len, src_len})); if (attn_mask_.defined()) { attn_mask_ = attn_mask_.unsqueeze(0); attn_output_weights += attn_mask_; } if (key_padding_mask_.defined()) { attn_output_weights = attn_output_weights.view({bsz, num_heads, tgt_len, src_len}); attn_output_weights = AT_DISPATCH_FLOATING_TYPES( attn_output_weights.scalar_type(), "attn_output_weights.masked_fill", [&]() { return attn_output_weights.masked_fill( key_padding_mask_.unsqueeze(1).unsqueeze(2), -std::numeric_limits<scalar_t>::infinity()); }); attn_output_weights = attn_output_weights.view({bsz * num_heads, tgt_len, src_len}); } attn_output_weights = F::softmax(attn_output_weights, /*options=*/-1); attn_output_weights = F::dropout( attn_output_weights, F::DropoutFuncOptions().p(dropout_p).training(training)); auto attn_output = torch::bmm(attn_output_weights, v); TORCH_CHECK( attn_output.sizes() == IntArrayRef({bsz * num_heads, tgt_len, head_dim})); attn_output = attn_output.transpose(0, 1).contiguous().view({tgt_len, bsz, embed_dim}); attn_output = F::linear(attn_output, out_proj_weight, out_proj_bias); if (need_weights) { attn_output_weights = attn_output_weights.view({bsz, num_heads, tgt_len, src_len}); if (average_attn_weights) { // average attention weights over heads attn_output_weights = attn_output_weights.sum(/*dim=*/1) / num_heads; } return std::make_tuple(attn_output, attn_output_weights); } else { return std::make_tuple(attn_output, Tensor()); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ inline std::tuple<Tensor, Tensor> multi_head_attention_forward( const Tensor& query, const Tensor& key, const Tensor& value, const MultiheadAttentionForwardFuncOptions& options) { return detail::multi_head_attention_forward( query, key, value, options.embed_dim_to_check(), options.num_heads(), options.in_proj_weight(), options.in_proj_bias(), options.bias_k(), options.bias_v(), options.add_zero_attn(), options.dropout_p(), options.out_proj_weight(), options.out_proj_bias(), options.training(), options.key_padding_mask(), options.need_weights(), options.attn_mask(), options.use_separate_proj_weight(), options.q_proj_weight(), options.k_proj_weight(), options.v_proj_weight(), options.static_k(), options.static_v(), options.average_attn_weights()); } } // namespace torch::nn::functional ```
============================================================================================================================================================ SOURCE CODE FILE: batchnorm.h LINES: 1 SIZE: 2.04 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\batchnorm.h ENCODING: utf-8 ```h #pragma once #include <c10/util/irange.h> #include <torch/nn/options/batchnorm.h> #include <torch/types.h> namespace torch::nn::functional { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor batch_norm( const Tensor& input, const Tensor& running_mean, const Tensor& running_var, Tensor weight, Tensor bias, bool training, double momentum, double eps) { TORCH_CHECK( input.dim() >= 2, "Expected at least 2 input dimensions, but got ", input.dim()); if (training) { auto size = input.sizes(); int64_t size_prods = size[0]; for (const auto i : c10::irange(size.size() - 2)) { size_prods *= size[i + 2]; } TORCH_CHECK( size_prods != 1, "Expected more than 1 value per channel when training, got input size ", size); } return torch::batch_norm( input, weight, bias, running_mean, running_var, training, momentum, eps, at::globalContext().userEnabledCuDNN()); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.batch_norm /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::BatchNormFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::batch_norm(input, mean, variance, /// F::BatchNormFuncOptions().weight(weight).bias(bias).momentum(0.1).eps(1e-05).training(false)); /// ``` inline Tensor batch_norm( const Tensor& input, const Tensor& running_mean, const Tensor& running_var, const BatchNormFuncOptions& options = {}) { return detail::batch_norm( input, running_mean, running_var, options.weight(), options.bias(), options.training(), options.momentum(), options.eps()); } } // namespace torch::nn::functional ```
======================================================================================================================================================= SOURCE CODE FILE: conv.h LINES: 1 SIZE: 8.20 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\conv.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/options/conv.h> #include <torch/types.h> namespace torch::nn::functional { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline std::string padding_unwrap(enumtype::kValid) { return "valid"; } inline std::string padding_unwrap(enumtype::kSame) { return "same"; } template <size_t D> IntArrayRef padding_unwrap(const ExpandingArray<D>& array) { return array; } inline Tensor conv1d( const Tensor& input, const Tensor& weight, const Tensor& bias, ExpandingArray<1> stride, const Conv1dFuncOptions::padding_t& padding, ExpandingArray<1> dilation, int64_t groups) { return std::visit( [&](const auto& pad) { return torch::conv1d( input, weight, bias, stride, padding_unwrap(pad), dilation, groups); }, padding); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.conv1d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::Conv1dFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1)); /// ``` inline Tensor conv1d( const Tensor& input, const Tensor& weight, const Conv1dFuncOptions& options = {}) { return detail::conv1d( input, weight, options.bias(), options.stride(), options.padding(), options.dilation(), options.groups()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor conv2d( const Tensor& input, const Tensor& weight, const Tensor& bias, ExpandingArray<2> stride, const Conv2dFuncOptions::padding_t& padding, ExpandingArray<2> dilation, int64_t groups) { return std::visit( [&](const auto& pad) { return torch::conv2d( input, weight, bias, stride, padding_unwrap(pad), dilation, groups); }, padding); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.conv2d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::Conv2dFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1)); /// ``` inline Tensor conv2d( const Tensor& input, const Tensor& weight, const Conv2dFuncOptions& options = {}) { return detail::conv2d( input, weight, options.bias(), options.stride(), options.padding(), options.dilation(), options.groups()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor conv3d( const Tensor& input, const Tensor& weight, const Tensor& bias, ExpandingArray<3> stride, const Conv3dFuncOptions::padding_t& padding, ExpandingArray<3> dilation, int64_t groups) { return std::visit( [&](const auto& pad) { return torch::conv3d( input, weight, bias, stride, padding_unwrap(pad), dilation, groups); }, padding); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.conv3d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::Conv3dFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1)); /// ``` inline Tensor conv3d( const Tensor& input, const Tensor& weight, const Conv3dFuncOptions& options = {}) { return detail::conv3d( input, weight, options.bias(), options.stride(), options.padding(), options.dilation(), options.groups()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor conv_transpose1d( const Tensor& input, const Tensor& weight, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) { return torch::conv_transpose1d( input, weight, bias, stride, padding, output_padding, groups, dilation); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.conv_transpose1d /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::ConvTranspose1dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1)); /// ``` inline Tensor conv_transpose1d( const Tensor& input, const Tensor& weight, const ConvTranspose1dFuncOptions& options = {}) { return detail::conv_transpose1d( input, weight, options.bias(), options.stride(), options.padding(), options.output_padding(), options.groups(), options.dilation()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor conv_transpose2d( const Tensor& input, const Tensor& weight, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) { return torch::conv_transpose2d( input, weight, bias, stride, padding, output_padding, groups, dilation); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.conv_transpose2d /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::ConvTranspose2dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1)); /// ``` inline Tensor conv_transpose2d( const Tensor& input, const Tensor& weight, const ConvTranspose2dFuncOptions& options = {}) { return detail::conv_transpose2d( input, weight, options.bias(), options.stride(), options.padding(), options.output_padding(), options.groups(), options.dilation()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor conv_transpose3d( const Tensor& input, const Tensor& weight, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) { return torch::conv_transpose3d( input, weight, bias, stride, padding, output_padding, groups, dilation); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.conv_transpose3d /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::ConvTranspose3dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1)); /// ``` inline Tensor conv_transpose3d( const Tensor& input, const Tensor& weight, const ConvTranspose3dFuncOptions& options = {}) { return detail::conv_transpose3d( input, weight, options.bias(), options.stride(), options.padding(), options.output_padding(), options.groups(), options.dilation()); } } // namespace torch::nn::functional ```
=========================================================================================================================================================== SOURCE CODE FILE: distance.h LINES: 1 SIZE: 2.52 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\distance.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/options/distance.h> namespace torch::nn::functional { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor cosine_similarity( const Tensor& x1, const Tensor& x2, int64_t dim, double eps) { return torch::cosine_similarity(x1, x2, dim, eps); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.cosine_similarity /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::CosineSimilarityFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::cosine_similarity(input1, input2, /// F::CosineSimilarityFuncOptions().dim(1)); /// ``` inline Tensor cosine_similarity( const Tensor& x1, const Tensor& x2, const CosineSimilarityFuncOptions& options = {}) { return detail::cosine_similarity(x1, x2, options.dim(), options.eps()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor pairwise_distance( const Tensor& x1, const Tensor& x2, double p, double eps, bool keepdim) { return torch::pairwise_distance(x1, x2, p, eps, keepdim); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.pairwise_distance /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::PairwiseDistanceFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1)); /// ``` inline Tensor pairwise_distance( const Tensor& x1, const Tensor& x2, const PairwiseDistanceFuncOptions& options = {}) { return detail::pairwise_distance( x1, x2, options.p(), options.eps(), options.keepdim()); } // ============================================================================ /// Computes the p-norm distance between every pair of row vectors in the input. /// This function will be faster if the rows are contiguous. inline Tensor pdist(const Tensor& input, double p = 2.0) { return torch::pdist(input, p); } } // namespace torch::nn::functional ```
========================================================================================================================================================== SOURCE CODE FILE: dropout.h LINES: 1 SIZE: 6.61 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\dropout.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/options/dropout.h> #include <utility> namespace torch::nn::functional { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor dropout(Tensor input, double p, bool training, bool inplace) { TORCH_CHECK( p >= 0. && p <= 1., "dropout probability has to be between 0 and 1, but got ", p); if (inplace) { return torch::dropout_(input, p, training); } else { return torch::dropout(input, p, training); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.dropout /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::DropoutFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::dropout(input, F::DropoutFuncOptions().p(0.5)); /// ``` inline Tensor dropout(Tensor input, const DropoutFuncOptions& options = {}) { return detail::dropout( std::move(input), options.p(), options.training(), options.inplace()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { template <int64_t unbatched_dim, int64_t batched_dim> inline Tensor _dropoutNd_helper( Tensor input, double p, bool training, bool inplace, const char* fn_name) { TORCH_CHECK( p >= 0. && p <= 1., "dropout probability has to be between 0 and 1, but got ", p); auto inp_dim = input.dim(); auto is_batched = inp_dim == batched_dim; if (!is_batched) { if (inplace) { input = input.unsqueeze_(0); } else { input = input.unsqueeze(0); } } Tensor result; if (inplace) { result = torch::feature_dropout_(input, p, training); } else { result = torch::feature_dropout(input, p, training); } if (!is_batched) { if (inplace) { result = result.squeeze_(0); } else { result = result.squeeze(0); } } return result; } inline Tensor dropout2d(Tensor input, double p, bool training, bool inplace) { return _dropoutNd_helper<3, 4>( std::move(input), p, training, inplace, "dropout2d"); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.dropout2d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::Dropout2dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5)); /// ``` inline Tensor dropout2d( Tensor input, const Dropout2dFuncOptions& options = {}) { return detail::dropout2d( std::move(input), options.p(), options.training(), options.inplace()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor dropout3d(Tensor input, double p, bool training, bool inplace) { return _dropoutNd_helper<4, 5>( std::move(input), p, training, inplace, "dropout3d"); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.dropout3d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::Dropout3dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5)); /// ``` inline Tensor dropout3d( Tensor input, const Dropout3dFuncOptions& options = {}) { return detail::dropout3d( std::move(input), options.p(), options.training(), options.inplace()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor alpha_dropout( Tensor input, double p, bool training, bool inplace) { if (p < 0. || p > 1.) { TORCH_CHECK( false, "dropout probability has to be between 0 and 1, but got ", p); } return inplace ? torch::alpha_dropout_(input, p, training) : torch::alpha_dropout(input, p, training); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.alpha_dropout /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::AlphaDropoutFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::alpha_dropout(input, /// F::AlphaDropoutFuncOptions().p(0.5).training(false)); /// ``` inline Tensor alpha_dropout( Tensor input, const AlphaDropoutFuncOptions& options = {}) { return detail::alpha_dropout( std::move(input), options.p(), options.training(), options.inplace()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor feature_alpha_dropout( Tensor input, double p, bool training, bool inplace) { if (p < 0. || p > 1.) { TORCH_CHECK( false, "dropout probability has to be between 0 and 1, but got ", p); } return inplace ? torch::feature_alpha_dropout_(input, p, training) : torch::feature_alpha_dropout(input, p, training); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.feature_alpha_dropout /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::FeatureAlphaDropoutFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::feature_alpha_dropout(input, /// F::FeatureAlphaDropoutFuncOptions().p(0.5).training(false)); /// ``` inline Tensor feature_alpha_dropout( Tensor input, const FeatureAlphaDropoutFuncOptions& options = {}) { return detail::feature_alpha_dropout( std::move(input), options.p(), options.training(), options.inplace()); } } // namespace torch::nn::functional ```
============================================================================================================================================================ SOURCE CODE FILE: embedding.h LINES: 1 SIZE: 6.41 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\embedding.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/options/embedding.h> namespace torch::nn::functional { inline Tensor one_hot(const Tensor& tensor, int64_t num_classes = -1) { return torch::one_hot(tensor, num_classes); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline void _no_grad_embedding_renorm_( Tensor weight, const Tensor& input, float max_norm, float norm_type) { torch::NoGradGuard no_grad; torch::embedding_renorm_(weight, input, max_norm, norm_type); } inline Tensor embedding( const Tensor& input, const Tensor& weight, std::optional<int64_t> padding_idx, std::optional<double> max_norm, double norm_type, bool scale_grad_by_freq, bool sparse) { auto input_ = input; if (padding_idx != std::nullopt) { if (*padding_idx > 0) { TORCH_CHECK( *padding_idx < weight.size(0), "Padding_idx must be within num_embeddings"); } else if (*padding_idx < 0) { TORCH_CHECK( *padding_idx >= -weight.size(0), "Padding_idx must be within num_embedding"); padding_idx = weight.size(0) + *padding_idx; } } else { padding_idx = -1; } if (max_norm != std::nullopt) { input_ = input_.contiguous(); // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) _no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type); } return torch::embedding( weight, input_, *padding_idx, scale_grad_by_freq, sparse); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.embedding /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::EmbeddingFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::embedding(input, weight, /// F::EmbeddingFuncOptions().norm_type(2.5).scale_grad_by_freq(true).sparse(true)); /// ``` inline Tensor embedding( const Tensor& input, const Tensor& weight, const EmbeddingFuncOptions& options = {}) { return detail::embedding( input, weight, options.padding_idx(), options.max_norm(), options.norm_type(), options.scale_grad_by_freq(), options.sparse()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor embedding_bag( const Tensor& input, const Tensor& weight, const Tensor& offsets, std::optional<double> max_norm, double norm_type, bool scale_grad_by_freq, EmbeddingBagMode mode, bool sparse, const Tensor& per_sample_weights, bool include_last_offset, std::optional<int64_t> padding_idx) { auto input_ = input; auto offsets_ = offsets; auto per_sample_weights_ = per_sample_weights; TORCH_CHECK( !per_sample_weights_.defined() || input_.sizes() == per_sample_weights_.sizes(), "embedding_bag: If per_sample_weights (", per_sample_weights_.sizes(), ") is not null, then it must have the same shape as the input (", input_.sizes(), ")"); if (input_.dim() == 2) { TORCH_CHECK( !offsets_.defined(), "If input is 2D, then offsets has to be null, as input is treated is a mini-batch of fixed length sequences. However, found offsets of type Tensor"); offsets_ = torch::arange( 0, input_.numel(), input_.size(1), torch::TensorOptions().dtype(torch::kLong).device(input_.device())); input_ = input_.reshape(-1); if (per_sample_weights_.defined()) { per_sample_weights_ = per_sample_weights_.reshape(-1); } } else if (input_.dim() == 1) { TORCH_CHECK( offsets_.defined(), "offsets has to be a 1D Tensor but got null"); TORCH_CHECK(offsets_.dim() == 1, "offsets has to be a 1D Tensor"); } else { TORCH_CHECK( false, "input has to be 1D or 2D Tensor, but got Tensor of dimension ", input_.dim()); } int mode_enum = 0; if (std::holds_alternative<enumtype::kSum>(mode)) { mode_enum = 0; } else if (std::holds_alternative<enumtype::kMean>(mode)) { mode_enum = 1; } else if (std::holds_alternative<enumtype::kMax>(mode)) { mode_enum = 2; TORCH_CHECK( !scale_grad_by_freq, "max mode does not support scaling the gradient by the frequency"); TORCH_CHECK(!sparse, "max mode does not support sparse weights"); } else { TORCH_CHECK(false, "mode has to be one of sum, mean or max"); } if (max_norm != std::nullopt) { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) _no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type); } TORCH_CHECK( !per_sample_weights_.defined() || std::get_if<enumtype::kSum>(&mode), "embedding_bag: per_sample_weights was not null. ", "per_sample_weights is only supported for mode='kSum' (got mode='", torch::enumtype::get_enum_name(mode), "').Please open a feature request on GitHub."); return std::get<0>(torch::embedding_bag( weight, input_, offsets_, scale_grad_by_freq, mode_enum, sparse, per_sample_weights_, include_last_offset, padding_idx)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.embedding_bag /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::EmbeddingBagFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::embedding_bag(input, weight, /// F::EmbeddingBagFuncOptions().mode(torch::kSum).offsets(offsets)); /// ``` inline Tensor embedding_bag( const Tensor& input, const Tensor& weight, const EmbeddingBagFuncOptions& options = {}) { return detail::embedding_bag( input, weight, options.offsets(), options.max_norm(), options.norm_type(), options.scale_grad_by_freq(), options.mode(), options.sparse(), options.per_sample_weights(), options.include_last_offset(), options.padding_idx()); } } // namespace torch::nn::functional ```
======================================================================================================================================================= SOURCE CODE FILE: fold.h LINES: 1 SIZE: 2.77 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\fold.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/options/fold.h> namespace torch::nn::functional { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor fold( const Tensor& input, ExpandingArray<2> output_size, ExpandingArray<2> kernel_size, ExpandingArray<2> dilation, ExpandingArray<2> padding, ExpandingArray<2> stride) { if (input.dim() == 3 || input.dim() == 2) { return torch::col2im( input, output_size, kernel_size, dilation, padding, stride); } else { TORCH_CHECK( false, "Input Error: Only unbatched (2D) or batched (3D) input Tensors are supported " "(got ", input.dim(), "D)"); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.fold /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::FoldFuncOptions` class to /// learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2})); /// ``` inline Tensor fold(const Tensor& input, const FoldFuncOptions& options) { return detail::fold( input, options.output_size(), options.kernel_size(), options.dilation(), options.padding(), options.stride()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor unfold( const Tensor& input, ExpandingArray<2> kernel_size, ExpandingArray<2> dilation, ExpandingArray<2> padding, ExpandingArray<2> stride) { if (input.dim() == 4) { return torch::im2col(input, kernel_size, dilation, padding, stride); } else { TORCH_CHECK( false, "Input Error: Only 4D input Tensors are supported " "(got ", input.dim(), "D)"); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.unfold /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::UnfoldFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2)); /// ``` inline Tensor unfold(const Tensor& input, const UnfoldFuncOptions& options) { return detail::unfold( input, options.kernel_size(), options.dilation(), options.padding(), options.stride()); } } // namespace torch::nn::functional ```
=============================================================================================================================================================== SOURCE CODE FILE: instancenorm.h LINES: 1 SIZE: 1.58 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\instancenorm.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/options/instancenorm.h> namespace torch::nn::functional { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor instance_norm( const Tensor& input, const Tensor& running_mean, const Tensor& running_var, const Tensor& weight, const Tensor& bias, bool use_input_stats, double momentum, double eps) { return torch::instance_norm( input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, at::globalContext().userEnabledCuDNN()); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.instance_norm /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::InstanceNormFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::instance_norm(input, /// F::InstanceNormFuncOptions().running_mean(mean).running_var(variance).weight(weight).bias(bias).momentum(0.1).eps(1e-5)); /// ``` inline Tensor instance_norm( const Tensor& input, const InstanceNormFuncOptions& options = {}) { return detail::instance_norm( input, options.running_mean(), options.running_var(), options.weight(), options.bias(), options.use_input_stats(), options.momentum(), options.eps()); } } // namespace torch::nn::functional ```
========================================================================================================================================================= SOURCE CODE FILE: linear.h LINES: 1 SIZE: 0.78 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\linear.h ENCODING: utf-8 ```h #pragma once #include <torch/types.h> namespace torch::nn::functional { inline Tensor bilinear( const Tensor& input1, const Tensor& input2, const Tensor& weight, const Tensor& bias = Tensor()) { return torch::bilinear(input1, input2, weight, bias); } // ============================================================================ inline Tensor linear( const Tensor& input, const Tensor& weight, const Tensor& bias = {}) { if (input.dim() == 2 && bias.defined()) { // fused op is marginally faster return torch::addmm(bias, input, weight.t()); } else { auto output = input.matmul(weight.t()); if (bias.defined()) { output += bias; } return output; } } } // namespace torch::nn::functional ```
======================================================================================================================================================= SOURCE CODE FILE: loss.h LINES: 1 SIZE: 32.11 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\loss.h ENCODING: utf-8 ```h #pragma once #include <ATen/ExpandUtils.h> #include <torch/nn/functional/activation.h> #include <torch/nn/options/loss.h> namespace torch::nn::functional { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor l1_loss( const Tensor& input, const Tensor& target, L1LossFuncOptions::reduction_t reduction) { return torch::l1_loss(input, target, enumtype::reduction_get_enum(reduction)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.l1_loss /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::L1LossFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone)); /// ``` inline Tensor l1_loss( const Tensor& input, const Tensor& target, const L1LossFuncOptions& options = {}) { return detail::l1_loss(input, target, options.reduction()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor kl_div( const Tensor& input, const Tensor& target, KLDivFuncOptions::reduction_t reduction, bool log_target = false) { torch::Reduction::Reduction reduction_enum{}; if (std::holds_alternative<enumtype::kMean>(reduction)) { TORCH_WARN( "reduction: 'mean' divides the total loss by both the batch size and the support size." "'batchmean' divides only by the batch size, and aligns with the KL div math definition." "'mean' will be changed to behave the same as 'batchmean' in the next major release."); } // special case for batchmean if (std::holds_alternative<enumtype::kBatchMean>(reduction)) { reduction_enum = torch::Reduction::Sum; } else { reduction_enum = enumtype::reduction_get_enum(reduction); } auto reduced = torch::kl_div(input, target, reduction_enum, log_target); if (std::holds_alternative<enumtype::kBatchMean>(reduction) && input.dim() != 0) { reduced = reduced / input.sizes()[0]; } return reduced; } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.kl_div /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::KLDivFuncOptions` class to /// learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::kl_div(input, target, /// F::KLDivFuncOptions.reduction(torch::kNone).log_target(false)); /// ``` inline Tensor kl_div( const Tensor& input, const Tensor& target, const KLDivFuncOptions& options = {}) { return detail::kl_div( input, target, options.reduction(), options.log_target()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor mse_loss( const Tensor& input, const Tensor& target, MSELossFuncOptions::reduction_t reduction) { if (!(target.sizes() == input.sizes())) { TORCH_WARN( "Using a target size (", target.sizes(), ") that is different to the input size (", input.sizes(), "). ", "This will likely lead to incorrect results due to broadcasting. ", "Please ensure they have the same size."); } std::vector<torch::Tensor> broadcast_tensors = torch::broadcast_tensors({input, target}); auto expanded_input = broadcast_tensors[0]; auto expanded_target = broadcast_tensors[1]; return torch::mse_loss( expanded_input, expanded_target, enumtype::reduction_get_enum(reduction)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.mse_loss /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::MSELossFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone)); /// ``` inline Tensor mse_loss( const Tensor& input, const Tensor& target, const MSELossFuncOptions& options = {}) { return detail::mse_loss(input, target, options.reduction()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor binary_cross_entropy( const Tensor& input, const Tensor& target, const Tensor& weight, BinaryCrossEntropyFuncOptions::reduction_t reduction) { auto reduction_enum = enumtype::reduction_get_enum(reduction); if (target.sizes() != input.sizes()) { TORCH_CHECK( false, "Using a target size (", target.sizes(), ") ", "that is different to the input size (", input.sizes(), ") is deprecated. ", "Please ensure they have the same size."); } auto weight_ = weight; if (weight_.defined()) { auto new_size = at::infer_size(target.sizes(), weight_.sizes()); weight_ = weight_.expand(new_size); } return torch::binary_cross_entropy(input, target, weight_, reduction_enum); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.binary_cross_entropy /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::BinaryCrossEntropyFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::binary_cross_entropy(input, target, /// F::BinaryCrossEntropyFuncOptions().weight(weight)); /// ``` inline Tensor binary_cross_entropy( const Tensor& input, const Tensor& target, const BinaryCrossEntropyFuncOptions& options = {}) { return detail::binary_cross_entropy( input, target, options.weight(), options.reduction()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor hinge_embedding_loss( const Tensor& input, const Tensor& target, double margin, HingeEmbeddingLossFuncOptions::reduction_t reduction) { return torch::hinge_embedding_loss( input, target, margin, enumtype::reduction_get_enum(reduction)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.hinge_embedding_loss /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::HingeEmbeddingLossFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::hinge_embedding_loss(input, target, /// F::HingeEmbeddingLossFuncOptions().margin(2)); /// ``` inline Tensor hinge_embedding_loss( const Tensor& input, const Tensor& target, const HingeEmbeddingLossFuncOptions& options = {}) { return detail::hinge_embedding_loss( input, target, options.margin(), options.reduction()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor multi_margin_loss( const Tensor& input, const Tensor& target, int64_t p, double margin, const Tensor& weight, MultiMarginLossFuncOptions::reduction_t reduction) { TORCH_CHECK(p == 1 || p == 2, "only p == 1 and p == 2 supported"); if (weight.defined()) { TORCH_CHECK(weight.dim() == 1, "weight must be one-dimensional"); } return torch::multi_margin_loss( input, target, p, margin, weight, enumtype::reduction_get_enum(reduction)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.multi_margin_loss /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::MultiMarginLossFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::multi_margin_loss(input, target, /// F::MultiMarginLossFuncOptions().margin(2).weight(weight)); /// ``` inline Tensor multi_margin_loss( const Tensor& input, const Tensor& target, const MultiMarginLossFuncOptions& options = {}) { return detail::multi_margin_loss( input, target, options.p(), options.margin(), options.weight(), options.reduction()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor cosine_embedding_loss( const Tensor& input1, const Tensor& input2, const Tensor& target, double margin, CosineEmbeddingLossFuncOptions::reduction_t reduction) { return torch::cosine_embedding_loss( input1, input2, target, margin, enumtype::reduction_get_enum(reduction)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.cosine_embedding_loss /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::CosineEmbeddingLossFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::cosine_embedding_loss(input1, input2, target, /// F::CosineEmbeddingLossFuncOptions().margin(0.5)); /// ``` inline Tensor cosine_embedding_loss( const Tensor& input1, const Tensor& input2, const Tensor& target, const CosineEmbeddingLossFuncOptions& options = {}) { return detail::cosine_embedding_loss( input1, input2, target, options.margin(), options.reduction()); } // ============================================================================ inline Tensor _smooth_l1_loss( const Tensor& input, const Tensor& target, double beta = 1.) { auto t = torch::abs(input - target); return torch::where(t < beta, 0.5 * torch::pow(t, 2) / beta, t - 0.5 * beta); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor smooth_l1_loss( const Tensor& input, const Tensor& target, SmoothL1LossFuncOptions::reduction_t reduction, std::optional<double> beta_opt = std::nullopt) { if (target.sizes() != input.sizes()) { TORCH_WARN( "Using a target size (", target.sizes(), ") that is different to the input size (", input.sizes(), "). ", "This will likely lead to incorrect results due to broadcasting. ", "Please ensure they have the same size."); } double beta = beta_opt.value_or(1.0); std::vector<Tensor> expanded_tensors = torch::broadcast_tensors({input, target}); return torch::smooth_l1_loss( expanded_tensors[0], expanded_tensors[1], enumtype::reduction_get_enum(reduction), beta); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.smooth_l1_loss /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::SmoothL1LossFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone)); /// ``` inline Tensor smooth_l1_loss( const Tensor& input, const Tensor& target, const SmoothL1LossFuncOptions& options = {}) { return detail::smooth_l1_loss( input, target, options.reduction(), options.beta()); } /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.smooth_l1_loss /// about the exact behavior of this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::smooth_l1_loss(input, target, /*options=*/torch::kNone, /*beta=*/0.5); /// ``` inline Tensor smooth_l1_loss( const Tensor& input, const Tensor& target, const SmoothL1LossFuncOptions& options, double beta) { TORCH_CHECK( !options.beta().has_value(), "expected beta not to be provided in 'options', but got ", options.beta()); return detail::smooth_l1_loss(input, target, options.reduction(), beta); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor huber_loss( const Tensor& input, const Tensor& target, HuberLossFuncOptions::reduction_t reduction, double delta = 1.) { if (target.sizes() != input.sizes()) { TORCH_WARN( "Using a target size (", target.sizes(), ") that is different to the input size (", input.sizes(), "). ", "This will likely lead to incorrect results due to broadcasting. ", "Please ensure they have the same size."); } std::vector<Tensor> expanded_tensors = torch::broadcast_tensors({input, target}); return torch::huber_loss( expanded_tensors[0], expanded_tensors[1], enumtype::reduction_get_enum(reduction), delta); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.huber_loss /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::HuberLossFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::huber_loss(input, target, /// F::HuberLossFuncOptions().reduction(torch::kNone).delta(0.5)); /// ``` inline Tensor huber_loss( const Tensor& input, const Tensor& target, const HuberLossFuncOptions& options = {}) { return detail::huber_loss( input, target, options.reduction(), options.delta()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor multilabel_margin_loss( const Tensor& input, const Tensor& target, MultilabelMarginLossFuncOptions::reduction_t reduction) { return torch::multilabel_margin_loss( input, target, enumtype::reduction_get_enum(reduction)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.multilabel_margin_loss /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::MultilabelMarginLossFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::multilabel_margin_loss(input, target, /// F::MultilabelMarginLossFuncOptions(torch::kNone)); /// ``` inline Tensor multilabel_margin_loss( const Tensor& input, const Tensor& target, const MultilabelMarginLossFuncOptions& options = {}) { return detail::multilabel_margin_loss(input, target, options.reduction()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor soft_margin_loss( const Tensor& input, const Tensor& target, SoftMarginLossFuncOptions::reduction_t reduction) { return torch::soft_margin_loss( input, target, enumtype::reduction_get_enum(reduction)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.soft_margin_loss /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::SoftMarginLossFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::soft_margin_loss(input, target, /// F::SoftMarginLossFuncOptions(torch::kNone)); /// ``` inline Tensor soft_margin_loss( const Tensor& input, const Tensor& target, const SoftMarginLossFuncOptions& options = {}) { return detail::soft_margin_loss(input, target, options.reduction()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor multilabel_soft_margin_loss( const Tensor& input, const Tensor& target, const Tensor& weight, MultilabelSoftMarginLossFuncOptions::reduction_t reduction) { auto loss = -(target * torch::log_sigmoid(input) + (1 - target) * torch::log_sigmoid(-input)); if (weight.defined()) { loss = loss * weight; } auto class_dim = input.dim() - 1; auto C = input.size(class_dim); loss = loss.sum(class_dim) / C; // only return N loss values Tensor ret; if (std::holds_alternative<enumtype::kNone>(reduction)) { ret = loss; } else if (std::holds_alternative<enumtype::kMean>(reduction)) { ret = loss.mean(); } else if (std::holds_alternative<enumtype::kSum>(reduction)) { ret = loss.sum(); } else { ret = input; TORCH_INTERNAL_ASSERT( false, enumtype::get_enum_name(reduction), " is not valid"); } return ret; } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.multilabel_soft_margin_loss /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::MultilabelSoftMarginLossFuncOptions` class to learn /// what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::multilabel_soft_margin_loss(input, target, /// F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight)); /// ``` inline Tensor multilabel_soft_margin_loss( const Tensor& input, const Tensor& target, const MultilabelSoftMarginLossFuncOptions& options = {}) { return detail::multilabel_soft_margin_loss( input, target, options.weight(), options.reduction()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor triplet_margin_loss( const Tensor& anchor, const Tensor& positive, const Tensor& negative, double margin, double p, double eps, bool swap, TripletMarginLossFuncOptions::reduction_t reduction) { return torch::triplet_margin_loss( anchor, positive, negative, margin, p, eps, swap, enumtype::reduction_get_enum(reduction)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.triplet_margin_loss /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::TripletMarginLossFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::triplet_margin_loss(anchor, positive, negative, /// F::TripletMarginLossFuncOptions().margin(1.0)); /// ``` inline Tensor triplet_margin_loss( const Tensor& anchor, const Tensor& positive, const Tensor& negative, const TripletMarginLossFuncOptions& options = {}) { return detail::triplet_margin_loss( anchor, positive, negative, options.margin(), options.p(), options.eps(), options.swap(), options.reduction()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor triplet_margin_with_distance_loss( const Tensor& anchor, const Tensor& positive, const Tensor& negative, std::optional<TripletMarginWithDistanceLossFuncOptions::distance_function_t> distance_function, double margin, bool swap, TripletMarginWithDistanceLossFuncOptions::reduction_t reduction) { Tensor dist_pos, dist_neg; if (distance_function.has_value()) { auto distance_function_impl = distance_function.value(); dist_pos = distance_function_impl(anchor, positive); dist_neg = distance_function_impl(anchor, negative); } else { dist_pos = pairwise_distance(anchor, positive); dist_neg = pairwise_distance(anchor, negative); } if (swap) { Tensor dist_swap; if (distance_function.has_value()) { dist_swap = distance_function.value()(positive, negative); } else { dist_swap = pairwise_distance(positive, negative); } dist_neg = torch::min(dist_neg, dist_swap); } auto loss = torch::clamp_min(dist_pos - dist_neg + margin, 0); Tensor ret; if (std::holds_alternative<enumtype::kNone>(reduction)) { ret = loss; } else if (std::holds_alternative<enumtype::kMean>(reduction)) { ret = loss.mean(); } else if (std::holds_alternative<enumtype::kSum>(reduction)) { ret = loss.sum(); } else { ret = anchor; TORCH_INTERNAL_ASSERT( false, enumtype::get_enum_name(reduction), " is not valid"); } return ret; } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.triplet_margin_with_distance_loss /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::TripletMarginWithDistanceLossFuncOptions` class to /// learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::triplet_margin_with_distance_loss(anchor, positive, negative, /// F::TripletMarginWithDistanceLossFuncOptions().margin(1.0)); /// ``` inline Tensor triplet_margin_with_distance_loss( const Tensor& anchor, const Tensor& positive, const Tensor& negative, const TripletMarginWithDistanceLossFuncOptions& options = {}) { return detail::triplet_margin_with_distance_loss( anchor, positive, negative, options.distance_function(), options.margin(), options.swap(), options.reduction()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor ctc_loss( const Tensor& log_probs, const Tensor& targets, const Tensor& input_lengths, const Tensor& target_lengths, int64_t blank, CTCLossFuncOptions::reduction_t reduction, bool zero_infinity) { return torch::ctc_loss( log_probs, targets, input_lengths, target_lengths, blank, enumtype::reduction_get_enum(reduction), zero_infinity); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.ctc_loss /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::CTCLossFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::ctc_loss(log_probs, targets, input_lengths, target_lengths, /// F::CTCLossFuncOptions().reduction(torch::kNone)); /// ``` inline Tensor ctc_loss( const Tensor& log_probs, const Tensor& targets, const Tensor& input_lengths, const Tensor& target_lengths, const CTCLossFuncOptions& options = {}) { return detail::ctc_loss( log_probs, targets, input_lengths, target_lengths, options.blank(), options.reduction(), options.zero_infinity()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor poisson_nll_loss( const Tensor& input, const Tensor& target, bool log_input, bool full, double eps, PoissonNLLLossFuncOptions::reduction_t reduction) { return torch::poisson_nll_loss( input, target, log_input, full, eps, enumtype::reduction_get_enum(reduction)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.poisson_nll_loss /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::PoissonNLLLossFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::poisson_nll_loss(input, target, /// F::PoissonNLLLossFuncOptions().reduction(torch::kNone)); /// ``` inline Tensor poisson_nll_loss( const Tensor& input, const Tensor& target, const PoissonNLLLossFuncOptions& options = {}) { return detail::poisson_nll_loss( input, target, options.log_input(), options.full(), options.eps(), options.reduction()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor margin_ranking_loss( const Tensor& input1, const Tensor& input2, const Tensor& target, double margin, MarginRankingLossFuncOptions::reduction_t reduction) { TORCH_CHECK( input1.dim() == input2.dim() && input1.dim() == target.dim(), "margin_ranking_loss : All input tensors should have same dimension but got sizes: " "input1: ", input1.sizes(), ", input2: ", input2.sizes(), ", target: ", target.sizes()); return torch::margin_ranking_loss( input1, input2, target, margin, enumtype::reduction_get_enum(reduction)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.margin_ranking_loss /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::MarginRankingLossFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::margin_ranking_loss(input1, input2, target, /// F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum)); /// ``` inline Tensor margin_ranking_loss( const Tensor& input1, const Tensor& input2, const Tensor& target, const MarginRankingLossFuncOptions& options = {}) { return detail::margin_ranking_loss( input1, input2, target, options.margin(), options.reduction()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor nll_loss( const Tensor& input, const Tensor& target, const Tensor& weight, int64_t ignore_index, const NLLLossFuncOptions::reduction_t& reduction) { if (input.dim() < 2) { TORCH_CHECK(false, "Expected 2 or more dimensions (got ", input.dim(), ")"); } if (input.sizes()[0] != target.sizes()[0]) { TORCH_CHECK( false, "Expected input batch_size (", input.sizes()[0], ") to match target batch_size (", target.sizes()[0], ")."); } return torch::nll_loss_nd( input, target, weight, enumtype::reduction_get_enum(reduction), ignore_index); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.nll_loss /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::NLLLossFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::nll_loss(input, target, /// F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean)); /// ``` inline Tensor nll_loss( const Tensor& input, const Tensor& target, const NLLLossFuncOptions& options = {}) { return detail::nll_loss( input, target, options.weight(), options.ignore_index(), options.reduction()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor cross_entropy( const Tensor& input, const Tensor& target, const Tensor& weight, int64_t ignore_index, CrossEntropyFuncOptions::reduction_t reduction, double label_smoothing) { return torch::cross_entropy_loss( input, target, weight, enumtype::reduction_get_enum(reduction), ignore_index, label_smoothing); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.cross_entropy /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::CrossEntropyFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::cross_entropy(input, target, /// F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean)); /// ``` inline Tensor cross_entropy( const Tensor& input, const Tensor& target, const CrossEntropyFuncOptions& options = {}) { return detail::cross_entropy( input, target, options.weight(), options.ignore_index(), options.reduction(), options.label_smoothing()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor binary_cross_entropy_with_logits( const Tensor& input, const Tensor& target, const Tensor& weight, BinaryCrossEntropyWithLogitsFuncOptions::reduction_t reduction, const Tensor& pos_weight) { TORCH_CHECK( target.sizes() == input.sizes(), "Target size (", target.sizes(), ") must be the same as input size (", input.sizes(), ")"); return torch::binary_cross_entropy_with_logits( input, target, weight, pos_weight, enumtype::reduction_get_enum(reduction)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.binary_cross_entropy_with_logits /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions` class to /// learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::binary_cross_entropy_with_logits(input, target, /// F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum)); /// ``` inline Tensor binary_cross_entropy_with_logits( const Tensor& input, const Tensor& target, const BinaryCrossEntropyWithLogitsFuncOptions& options = {}) { return detail::binary_cross_entropy_with_logits( input, target, options.weight(), options.reduction(), options.pos_weight()); } } // namespace torch::nn::functional ```
================================================================================================================================================================ SOURCE CODE FILE: normalization.h LINES: 1 SIZE: 6.03 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\normalization.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/functional/padding.h> #include <torch/nn/functional/pooling.h> #include <torch/nn/options/normalization.h> #include <torch/types.h> namespace torch::nn::functional { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor normalize( const Tensor& input, double p, int64_t dim, double eps, std::optional<Tensor> out) { if (out == std::nullopt) { auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input); return input / denom; } else { auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input); return torch::div_out(*out, input, denom); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.normalize /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::NormalizeFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1)); /// ``` inline Tensor normalize( const Tensor& input, NormalizeFuncOptions options = {}) { return detail::normalize( input, options.p(), options.dim(), options.eps(), options.out()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor layer_norm( const Tensor& input, const std::vector<int64_t>& normalized_shape, const Tensor& weight, const Tensor& bias, double eps) { return torch::layer_norm(input, normalized_shape, weight, bias, eps); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.layer_norm /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::LayerNormFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5)); /// ``` inline Tensor layer_norm( const Tensor& input, const LayerNormFuncOptions& options) { return detail::layer_norm( input, options.normalized_shape(), options.weight(), options.bias(), options.eps()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor local_response_norm( const Tensor& input, int64_t size, double alpha, double beta, double k) { auto dim = input.dim(); TORCH_CHECK( dim >= 3, "Expected 3D or higher dimensionality input (got ", dim, " dimensions)"); auto div = input.mul(input).unsqueeze(1); if (dim == 3) { div = detail::pad( div, /*pad=*/{0, 0, size / 2, (size - 1) / 2}, /*mode=*/torch::kConstant, /*value=*/0); div = detail::avg_pool2d( div, /*kernel_size=*/{size, 1}, /*stride=*/1, /*padding=*/0, /*ceil_mode=*/false, /*count_include_pad=*/true, /*divisor_override=*/std::nullopt) .squeeze(1); } else { auto sizes = input.sizes(); div = div.view({sizes[0], 1, sizes[1], sizes[2], -1}); div = detail::pad( div, /*pad=*/{0, 0, 0, 0, size / 2, (size - 1) / 2}, /*mode=*/torch::kConstant, /*value=*/0); div = detail::avg_pool3d( div, /*kernel_size=*/{size, 1, 1}, /*stride=*/1, /*padding=*/0, /*ceil_mode=*/false, /*count_include_pad=*/true, /*divisor_override=*/std::nullopt) .squeeze(1); div = div.view(sizes); } div = div.mul(alpha).add(k).pow(beta); return input / div; } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.local_response_norm /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::LocalResponseNormFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::local_response_norm(x, F::LocalResponseNormFuncOptions(2)); /// ``` inline Tensor local_response_norm( const Tensor& input, const LocalResponseNormFuncOptions& options) { return detail::local_response_norm( input, options.size(), options.alpha(), options.beta(), options.k()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor group_norm( const Tensor& input, int64_t num_groups, const Tensor& weight, const Tensor& bias, double eps) { return torch::group_norm( input, num_groups, weight, bias, eps, at::globalContext().userEnabledCuDNN()); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.group_norm /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::GroupNormFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::group_norm(input, F::GroupNormFuncOptions(2).eps(2e-5)); /// ``` inline Tensor group_norm( const Tensor& input, const GroupNormFuncOptions& options) { return detail::group_norm( input, options.num_groups(), options.weight(), options.bias(), options.eps()); } } // namespace torch::nn::functional ```
========================================================================================================================================================== SOURCE CODE FILE: padding.h LINES: 1 SIZE: 1.69 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\padding.h ENCODING: utf-8 ```h #pragma once #include <ATen/PadNd.h> #include <torch/nn/options/padding.h> namespace torch::nn::functional { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor pad( const Tensor& input, IntArrayRef pad, PadFuncOptions::mode_t mode, double value) { const auto mode_enum = [&] { if (std::holds_alternative<enumtype::kConstant>(mode)) { return at::padding_mode::constant; } else if (std::holds_alternative<enumtype::kReflect>(mode)) { return at::padding_mode::reflect; } else if (std::holds_alternative<enumtype::kReplicate>(mode)) { return at::padding_mode::replicate; } else if (std::holds_alternative<enumtype::kCircular>(mode)) { return at::padding_mode::circular; } TORCH_CHECK(false, "Unrecognised padding mode"); }(); std::optional<double> fill_value; if (value != 0.0) { fill_value = value; } return at::_pad_enum(input, pad, static_cast<int64_t>(mode_enum), fill_value); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.pad /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::PadFuncOptions` class to /// learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::pad(input, F::PadFuncOptions({1, 2, 2, 1, 1, /// 2}).mode(torch::kReplicate)); /// ``` inline Tensor pad(const Tensor& input, const PadFuncOptions& options) { return detail::pad(input, options.pad(), options.mode(), options.value()); } } // namespace torch::nn::functional ```
=============================================================================================================================================================== SOURCE CODE FILE: pixelshuffle.h LINES: 1 SIZE: 1.30 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\pixelshuffle.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/options/pixelshuffle.h> namespace torch::nn::functional { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor pixel_shuffle(const Tensor& input, int64_t upscale_factor) { return torch::pixel_shuffle(input, upscale_factor); } inline Tensor pixel_unshuffle(const Tensor& input, int64_t downscale_factor) { return torch::pixel_unshuffle(input, downscale_factor); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.pixel_shuffle /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::PixelShuffleFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2)); /// ``` inline Tensor pixel_shuffle( const Tensor& input, const PixelShuffleFuncOptions& options) { return detail::pixel_shuffle(input, options.upscale_factor()); } inline Tensor pixel_unshuffle( const Tensor& input, const PixelUnshuffleFuncOptions& options) { return detail::pixel_unshuffle(input, options.downscale_factor()); } } // namespace torch::nn::functional ```
========================================================================================================================================================== SOURCE CODE FILE: pooling.h LINES: 1 SIZE: 35.70 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\pooling.h ENCODING: utf-8 ```h #pragma once #include <c10/util/irange.h> #include <torch/nn/functional/activation.h> #include <torch/nn/modules/utils.h> #include <torch/nn/options/pooling.h> namespace torch::nn::functional { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor avg_pool1d( const Tensor& input, ExpandingArray<1> kernel_size, ExpandingArray<1> stride, ExpandingArray<1> padding, bool ceil_mode, bool count_include_pad) { return torch::avg_pool1d( input, kernel_size, stride, padding, ceil_mode, count_include_pad); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.avg_pool1d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::AvgPool1dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2)); /// ``` inline Tensor avg_pool1d( const Tensor& input, const AvgPool1dFuncOptions& options) { return avg_pool1d( input, options.kernel_size(), options.stride(), options.padding(), options.ceil_mode(), options.count_include_pad()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor avg_pool2d( const Tensor& input, ExpandingArray<2> kernel_size, ExpandingArray<2> stride, ExpandingArray<2> padding, bool ceil_mode, bool count_include_pad, std::optional<int64_t> divisor_override) { return torch::avg_pool2d( input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.avg_pool2d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::AvgPool2dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2)); /// ``` inline Tensor avg_pool2d( const Tensor& input, const AvgPool2dFuncOptions& options) { return detail::avg_pool2d( input, options.kernel_size(), options.stride(), options.padding(), options.ceil_mode(), options.count_include_pad(), options.divisor_override()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor avg_pool3d( const Tensor& input, ExpandingArray<3> kernel_size, ExpandingArray<3> stride, ExpandingArray<3> padding, bool ceil_mode, bool count_include_pad, std::optional<int64_t> divisor_override) { return torch::avg_pool3d( input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.avg_pool3d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::AvgPool3dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2)); /// ``` inline Tensor avg_pool3d( const Tensor& input, const AvgPool3dFuncOptions& options) { return detail::avg_pool3d( input, options.kernel_size(), options.stride(), options.padding(), options.ceil_mode(), options.count_include_pad(), options.divisor_override()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor max_pool1d( const Tensor& input, ExpandingArray<1> kernel_size, ExpandingArray<1> stride, ExpandingArray<1> padding, ExpandingArray<1> dilation, bool ceil_mode) { return torch::max_pool1d( input, kernel_size, stride, padding, dilation, ceil_mode); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.max_pool1d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::MaxPool1dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2)); /// ``` inline Tensor max_pool1d( const Tensor& input, const MaxPool1dFuncOptions& options) { return detail::max_pool1d( input, options.kernel_size(), options.stride(), options.padding(), options.dilation(), options.ceil_mode()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline std::tuple<Tensor, Tensor> max_pool1d_with_indices( const Tensor& input, ExpandingArray<1> kernel_size, ExpandingArray<1> stride, ExpandingArray<1> padding, ExpandingArray<1> dilation, bool ceil_mode) { return torch::max_pool1d_with_indices( input, kernel_size, stride, padding, dilation, ceil_mode); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See the documentation for `torch::nn::functional::MaxPool1dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::max_pool1d_with_indices(x, F::MaxPool1dFuncOptions(3).stride(2)); /// ``` inline std::tuple<Tensor, Tensor> max_pool1d_with_indices( const Tensor& input, const MaxPool1dFuncOptions& options) { return detail::max_pool1d_with_indices( input, options.kernel_size(), options.stride(), options.padding(), options.dilation(), options.ceil_mode()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor max_pool2d( const Tensor& input, ExpandingArray<2> kernel_size, ExpandingArray<2> stride, ExpandingArray<2> padding, ExpandingArray<2> dilation, bool ceil_mode) { return torch::max_pool2d( input, kernel_size, stride, padding, dilation, ceil_mode); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.max_pool2d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::MaxPool2dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2)); /// ``` inline Tensor max_pool2d( const Tensor& input, const MaxPool2dFuncOptions& options) { return detail::max_pool2d( input, options.kernel_size(), options.stride(), options.padding(), options.dilation(), options.ceil_mode()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline std::tuple<Tensor, Tensor> max_pool2d_with_indices( const Tensor& input, ExpandingArray<2> kernel_size, ExpandingArray<2> stride, ExpandingArray<2> padding, ExpandingArray<2> dilation, bool ceil_mode) { return torch::max_pool2d_with_indices( input, kernel_size, stride, padding, dilation, ceil_mode); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See the documentation for `torch::nn::functional::MaxPool2dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::max_pool2d_with_indices(x, F::MaxPool2dFuncOptions(3).stride(2)); /// ``` inline std::tuple<Tensor, Tensor> max_pool2d_with_indices( const Tensor& input, const MaxPool2dFuncOptions& options) { return detail::max_pool2d_with_indices( input, options.kernel_size(), options.stride(), options.padding(), options.dilation(), options.ceil_mode()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor max_pool3d( const Tensor& input, ExpandingArray<3> kernel_size, ExpandingArray<3> stride, ExpandingArray<3> padding, ExpandingArray<3> dilation, bool ceil_mode) { return torch::max_pool3d( input, kernel_size, stride, padding, dilation, ceil_mode); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.max_pool3d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::MaxPool3dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2)); /// ``` inline Tensor max_pool3d( const Tensor& input, const MaxPool3dFuncOptions& options) { return detail::max_pool3d( input, options.kernel_size(), options.stride(), options.padding(), options.dilation(), options.ceil_mode()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline std::tuple<Tensor, Tensor> max_pool3d_with_indices( const Tensor& input, ExpandingArray<3> kernel_size, ExpandingArray<3> stride, ExpandingArray<3> padding, ExpandingArray<3> dilation, bool ceil_mode) { return torch::max_pool3d_with_indices( input, kernel_size, stride, padding, dilation, ceil_mode); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See the documentation for `torch::nn::functional::MaxPool3dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::max_pool3d_with_indices(x, F::MaxPool3dFuncOptions(3).stride(2)); /// ``` inline std::tuple<Tensor, Tensor> max_pool3d_with_indices( const Tensor& input, const MaxPool3dFuncOptions& options) { return detail::max_pool3d_with_indices( input, options.kernel_size(), options.stride(), options.padding(), options.dilation(), options.ceil_mode()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline std::tuple<Tensor, Tensor> adaptive_max_pool1d_with_indices( const Tensor& input, ExpandingArray<1> output_size) { return torch::adaptive_max_pool1d(input, output_size); } } // namespace detail /// See the documentation for /// `torch::nn::functional::AdaptiveMaxPool1dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::adaptive_max_pool1d_with_indices(x, F::AdaptiveMaxPool1dFuncOptions(3)); /// ``` inline std::tuple<Tensor, Tensor> adaptive_max_pool1d_with_indices( const Tensor& input, const AdaptiveMaxPool1dFuncOptions& options) { return detail::adaptive_max_pool1d_with_indices(input, options.output_size()); } namespace detail { inline Tensor adaptive_max_pool1d( const Tensor& input, ExpandingArray<1> output_size) { return std::get<0>(adaptive_max_pool1d_with_indices(input, output_size)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.adaptive_max_pool1d /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::AdaptiveMaxPool1dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3)); /// ``` inline Tensor adaptive_max_pool1d( const Tensor& input, const AdaptiveMaxPool1dFuncOptions& options) { return detail::adaptive_max_pool1d(input, options.output_size()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline std::tuple<Tensor, Tensor> adaptive_max_pool2d_with_indices( const Tensor& input, ExpandingArrayWithOptionalElem<2> output_size) { auto output_size_ = torch::nn::modules::utils::_list_with_default(output_size, input.sizes()); return torch::adaptive_max_pool2d(input, output_size_); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See the documentation for /// `torch::nn::functional::AdaptiveMaxPool2dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::adaptive_max_pool2d_with_indices(x, F::AdaptiveMaxPool2dFuncOptions(3)); /// ``` inline std::tuple<Tensor, Tensor> adaptive_max_pool2d_with_indices( const Tensor& input, const AdaptiveMaxPool2dFuncOptions& options) { return detail::adaptive_max_pool2d_with_indices(input, options.output_size()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor adaptive_max_pool2d( const Tensor& input, ExpandingArrayWithOptionalElem<2> output_size) { return std::get<0>(adaptive_max_pool2d_with_indices(input, output_size)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.adaptive_max_pool2d /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::AdaptiveMaxPool2dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3)); /// ``` inline Tensor adaptive_max_pool2d( const Tensor& input, const AdaptiveMaxPool2dFuncOptions& options) { return detail::adaptive_max_pool2d(input, options.output_size()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline std::tuple<Tensor, Tensor> adaptive_max_pool3d_with_indices( const Tensor& input, ExpandingArrayWithOptionalElem<3> output_size) { auto output_size_ = torch::nn::modules::utils::_list_with_default(output_size, input.sizes()); return torch::adaptive_max_pool3d(input, output_size_); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See the documentation for /// `torch::nn::functional::AdaptiveMaxPool3dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::adaptive_max_pool3d_with_indices(x, F::AdaptiveMaxPool3dFuncOptions(3)); /// ``` inline std::tuple<Tensor, Tensor> adaptive_max_pool3d_with_indices( const Tensor& input, const AdaptiveMaxPool3dFuncOptions& options) { return detail::adaptive_max_pool3d_with_indices(input, options.output_size()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor adaptive_max_pool3d( const Tensor& input, ExpandingArrayWithOptionalElem<3> output_size) { return std::get<0>(adaptive_max_pool3d_with_indices(input, output_size)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.adaptive_max_pool3d /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::AdaptiveMaxPool3dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3)); /// ``` inline Tensor adaptive_max_pool3d( const Tensor& input, const AdaptiveMaxPool3dFuncOptions& options) { return detail::adaptive_max_pool3d(input, options.output_size()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor adaptive_avg_pool1d( const Tensor& input, ExpandingArray<1> output_size) { return torch::adaptive_avg_pool1d(input, output_size); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.adaptive_avg_pool1d /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::AdaptiveAvgPool1dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3)); /// ``` inline Tensor adaptive_avg_pool1d( const Tensor& input, const AdaptiveAvgPool1dFuncOptions& options) { return detail::adaptive_avg_pool1d(input, options.output_size()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor adaptive_avg_pool2d( const Tensor& input, ExpandingArrayWithOptionalElem<2> output_size) { auto output_size_ = torch::nn::modules::utils::_list_with_default(output_size, input.sizes()); return torch::adaptive_avg_pool2d(input, output_size_); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.adaptive_avg_pool2d /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::AdaptiveAvgPool2dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3)); /// ``` inline Tensor adaptive_avg_pool2d( const Tensor& input, const AdaptiveAvgPool2dFuncOptions& options) { return detail::adaptive_avg_pool2d(input, options.output_size()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor adaptive_avg_pool3d( const Tensor& input, ExpandingArrayWithOptionalElem<3> output_size) { auto output_size_ = torch::nn::modules::utils::_list_with_default(output_size, input.sizes()); return torch::adaptive_avg_pool3d(input, output_size_); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.adaptive_avg_pool3d /// about the exact behavior of this functional. /// /// See the documentation for /// `torch::nn::functional::AdaptiveAvgPool3dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3)); /// ``` inline Tensor adaptive_avg_pool3d( const Tensor& input, const AdaptiveAvgPool3dFuncOptions& options) { return detail::adaptive_avg_pool3d(input, options.output_size()); } // ============================================================================ inline std::vector<int64_t> _unpool_output_size( const Tensor& input, const IntArrayRef& kernel_size, const IntArrayRef& stride, const IntArrayRef& padding, const std::optional<std::vector<int64_t>>& output_size) { auto input_size = input.sizes(); std::vector<int64_t> default_size; for (const auto d : c10::irange(kernel_size.size())) { default_size.push_back( (input_size[input_size.size() - kernel_size.size() + d] - 1) * stride[d] + kernel_size[d] - 2 * padding[d]); } if (!output_size) { return default_size; } else { std::vector<int64_t> output_size_; if (output_size->size() == kernel_size.size() + 2) { output_size_ = IntArrayRef(*output_size).slice(2).vec(); } if (output_size_.size() != kernel_size.size()) { TORCH_CHECK( false, "output_size should be a sequence containing ", kernel_size.size(), " or ", kernel_size.size() + 2, " elements, but it has a length of '", output_size_.size(), "'"); } for (const auto d : c10::irange(kernel_size.size())) { const auto min_size = default_size[d] - stride[d]; const auto max_size = default_size[d] + stride[d]; if (!(min_size <= output_size_[d] && output_size_[d] <= max_size)) { TORCH_CHECK( false, "invalid output_size ", output_size_, " (dim ", d, " must be between ", min_size, " and ", max_size, ")"); } } return output_size_; } } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor max_unpool1d( const Tensor& input, const Tensor& indices, ExpandingArray<1> kernel_size, ExpandingArray<1> stride, ExpandingArray<1> padding, const std::optional<std::vector<int64_t>>& output_size) { auto output_size_ = _unpool_output_size(input, kernel_size, stride, padding, output_size); output_size_.push_back(1); return torch::max_unpool2d( input.unsqueeze(-1), indices.unsqueeze(-1), output_size_) .squeeze(-1); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.max_unpool1d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::MaxUnpool1dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::max_unpool1d(x, indices, /// F::MaxUnpool1dFuncOptions(3).stride(2).padding(1)); /// ``` inline Tensor max_unpool1d( const Tensor& input, const Tensor& indices, const MaxUnpool1dFuncOptions& options) { return detail::max_unpool1d( input, indices, options.kernel_size(), options.stride(), options.padding(), options.output_size()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor max_unpool2d( const Tensor& input, const Tensor& indices, ExpandingArray<2> kernel_size, ExpandingArray<2> stride, ExpandingArray<2> padding, const std::optional<std::vector<int64_t>>& output_size) { auto output_size_ = _unpool_output_size(input, kernel_size, stride, padding, output_size); return torch::max_unpool2d(input, indices, output_size_); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.max_unpool2d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::MaxUnpool2dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::max_unpool2d(x, indices, /// F::MaxUnpool2dFuncOptions(3).stride(2).padding(1)); /// ``` inline Tensor max_unpool2d( const Tensor& input, const Tensor& indices, const MaxUnpool2dFuncOptions& options) { return detail::max_unpool2d( input, indices, options.kernel_size(), options.stride(), options.padding(), options.output_size()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor max_unpool3d( const Tensor& input, const Tensor& indices, ExpandingArray<3> kernel_size, ExpandingArray<3> stride, ExpandingArray<3> padding, const std::optional<std::vector<int64_t>>& output_size) { auto output_size_ = _unpool_output_size(input, kernel_size, stride, padding, output_size); return torch::max_unpool3d(input, indices, output_size_, stride, padding); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.max_unpool3d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::MaxUnpool3dFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3)); /// ``` inline Tensor max_unpool3d( const Tensor& input, const Tensor& indices, const MaxUnpool3dFuncOptions& options) { return detail::max_unpool3d( input, indices, options.kernel_size(), options.stride(), options.padding(), options.output_size()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline std::tuple<Tensor, Tensor> fractional_max_pool2d_with_indices( const Tensor& input, const ExpandingArray<2>& kernel_size, const std::optional<ExpandingArray<2>>& output_size, const std::optional<ExpandingArray<2, double>>& output_ratio, const Tensor& _random_samples) { if (output_size == std::nullopt && output_ratio == std::nullopt) { TORCH_CHECK( false, "fractional_max_pool2d requires specifying either ", "an output_size or an output_ratio"); } std::optional<ExpandingArray<2>> output_size_ = output_size; if (output_size_ == std::nullopt) { TORCH_INTERNAL_ASSERT(output_ratio != std::nullopt); output_size_ = { (int64_t)(static_cast<double>(input.size(-2)) * (*output_ratio.value())[0]), (int64_t)(static_cast<double>(input.size(-1)) * (*output_ratio.value())[1])}; } Tensor _random_samples_ = _random_samples; if (!_random_samples_.defined()) { auto n_batch = input.dim() == 3 ? 1 : input.size(0); _random_samples_ = torch::rand( {n_batch, input.size(-3), 2}, torch::TensorOptions().dtype(input.dtype()).device(input.device())); } return torch::fractional_max_pool2d( input, kernel_size, *output_size_, _random_samples_); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See the documentation for /// `torch::nn::functional::FractionalMaxPool2dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::fractional_max_pool2d_with_indices(x, /// F::FractionalMaxPool2dFuncOptions(3).output_size(2)); /// ``` inline std::tuple<Tensor, Tensor> fractional_max_pool2d_with_indices( const Tensor& input, const FractionalMaxPool2dFuncOptions& options) { return detail::fractional_max_pool2d_with_indices( input, options.kernel_size(), options.output_size(), options.output_ratio(), options._random_samples()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor fractional_max_pool2d( const Tensor& input, ExpandingArray<2> kernel_size, std::optional<ExpandingArray<2>> output_size, std::optional<ExpandingArray<2, double>> output_ratio, const Tensor& _random_samples) { return std::get<0>(fractional_max_pool2d_with_indices( input, kernel_size, output_size, output_ratio, _random_samples)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See the documentation for /// `torch::nn::functional::FractionalMaxPool2dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::fractional_max_pool2d(x, /// F::FractionalMaxPool2dFuncOptions(3).output_size(2)); /// ``` inline Tensor fractional_max_pool2d( const Tensor& input, const FractionalMaxPool2dFuncOptions& options) { return detail::fractional_max_pool2d( input, options.kernel_size(), options.output_size(), options.output_ratio(), options._random_samples()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline std::tuple<Tensor, Tensor> fractional_max_pool3d_with_indices( const Tensor& input, const ExpandingArray<3>& kernel_size, const std::optional<ExpandingArray<3>>& output_size, const std::optional<ExpandingArray<3, double>>& output_ratio, const Tensor& _random_samples) { if (output_size == std::nullopt && output_ratio == std::nullopt) { TORCH_CHECK( false, "fractional_max_pool3d requires specifying either ", "an output_size or an output_ratio"); } std::optional<ExpandingArray<3>> output_size_ = output_size; if (output_size_ == std::nullopt) { TORCH_INTERNAL_ASSERT(output_ratio != std::nullopt); output_size_ = { (int64_t)(static_cast<double>(input.size(-3)) * (*output_ratio.value())[0]), (int64_t)(static_cast<double>(input.size(-2)) * (*output_ratio.value())[1]), (int64_t)(static_cast<double>(input.size(-1)) * (*output_ratio.value())[2])}; } Tensor _random_samples_ = _random_samples; if (!_random_samples_.defined()) { auto n_batch = input.dim() == 4 ? 1 : input.size(0); _random_samples_ = torch::rand( {n_batch, input.size(-4), 3}, torch::TensorOptions().dtype(input.dtype()).device(input.device())); } return torch::fractional_max_pool3d( input, kernel_size, *output_size_, _random_samples_); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See the documentation for /// `torch::nn::functional::FractionalMaxPool3dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::fractional_max_pool3d_with_indices(x, /// F::FractionalMaxPool3dFuncOptions(3).output_size(2)); /// ``` inline std::tuple<Tensor, Tensor> fractional_max_pool3d_with_indices( const Tensor& input, const FractionalMaxPool3dFuncOptions& options) { return detail::fractional_max_pool3d_with_indices( input, options.kernel_size(), options.output_size(), options.output_ratio(), options._random_samples()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor fractional_max_pool3d( const Tensor& input, ExpandingArray<3> kernel_size, std::optional<ExpandingArray<3>> output_size, std::optional<ExpandingArray<3, double>> output_ratio, const Tensor& _random_samples) { return std::get<0>(fractional_max_pool3d_with_indices( input, kernel_size, output_size, output_ratio, _random_samples)); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See the documentation for /// `torch::nn::functional::FractionalMaxPool3dFuncOptions` class to learn what /// optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::fractional_max_pool3d(x, /// F::FractionalMaxPool3dFuncOptions(3).output_size(2)); /// ``` inline Tensor fractional_max_pool3d( const Tensor& input, const FractionalMaxPool3dFuncOptions& options) { return detail::fractional_max_pool3d( input, options.kernel_size(), options.output_size(), options.output_ratio(), options._random_samples()); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor lp_pool1d( const Tensor& input, double norm_type, ExpandingArray<1> kernel_size, ExpandingArray<1> stride, bool ceil_mode) { Tensor out = detail::avg_pool1d( input.pow(norm_type), kernel_size, stride, /*padding=*/0, ceil_mode, /*count_include_pad=*/true); return (torch::sign(out) * relu(torch::abs(out))) .mul((*kernel_size)[0]) .pow(1. / norm_type); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.lp_pool1d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::LPPool1dFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2)); /// ``` inline Tensor lp_pool1d( const Tensor& input, const LPPool1dFuncOptions& options) { return detail::lp_pool1d( input, options.norm_type(), options.kernel_size(), options.stride(), options.ceil_mode()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor lp_pool2d( const Tensor& input, double norm_type, ExpandingArray<2> kernel_size, ExpandingArray<2> stride, bool ceil_mode) { auto kw = (*kernel_size)[0]; auto kh = (*kernel_size)[1]; Tensor out = detail::avg_pool2d( input.pow(norm_type), kernel_size, stride, /*padding=*/0, ceil_mode, /*count_include_pad=*/true, /*divisor_override=*/std::nullopt); return (torch::sign(out) * relu(torch::abs(out))) .mul(kw * kh) .pow(1. / norm_type); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.lp_pool2d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::LPPool2dFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2)); /// ``` inline Tensor lp_pool2d( const Tensor& input, const LPPool2dFuncOptions& options) { return detail::lp_pool2d( input, options.norm_type(), options.kernel_size(), options.stride(), options.ceil_mode()); } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor lp_pool3d( const Tensor& input, double norm_type, ExpandingArray<3> kernel_size, ExpandingArray<3> stride, bool ceil_mode) { auto kd = (*kernel_size)[0]; auto kw = (*kernel_size)[1]; auto kh = (*kernel_size)[2]; Tensor out = detail::avg_pool3d( input.pow(norm_type), kernel_size, stride, /*padding=*/0, ceil_mode, /*count_include_pad=*/true, /*divisor_override=*/std::nullopt); return (torch::sign(out) * relu(torch::abs(out))) .mul(kd * kw * kh) .pow(1. / norm_type); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.lp_pool3d /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::LPPool3dFuncOptions` class /// to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::lp_pool3d(x, F::LPPool3dFuncOptions(3, {3, 3, 5}).stride(3)); /// ``` inline Tensor lp_pool3d( const Tensor& input, const LPPool3dFuncOptions& options) { return detail::lp_pool3d( input, options.norm_type(), options.kernel_size(), options.stride(), options.ceil_mode()); } } // namespace torch::nn::functional ```
============================================================================================================================================================= SOURCE CODE FILE: upsampling.h LINES: 1 SIZE: 10.76 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\upsampling.h ENCODING: utf-8 ```h #pragma once #include <c10/util/irange.h> #include <torch/nn/functional/pooling.h> #include <torch/nn/options/upsampling.h> #include <cmath> #include <utility> namespace torch::nn::functional { inline std::vector<int64_t> _interp_output_size( int64_t dim, std::tuple< Tensor, std::optional<std::vector<int64_t>>, std::optional<std::vector<double>>, std::optional<bool>> closed_over_args) { auto [input, size, scale_factor, recompute_scale_factor] = std::move(closed_over_args); if (size == std::nullopt && scale_factor == std::nullopt) { TORCH_CHECK(false, "either size or scale_factor should be defined"); } if (size != std::nullopt && scale_factor != std::nullopt) { TORCH_CHECK(false, "only one of size or scale_factor should be defined"); } if (scale_factor != std::nullopt) { if (static_cast<int64_t>(scale_factor.value().size()) != dim) { TORCH_CHECK( false, "scale_factor shape must match input shape. ", "Input is ", dim, "D, scale_factor size is ", torch::ArrayRef<double>(*scale_factor)); } } if (size != std::nullopt) { return *size; } TORCH_INTERNAL_ASSERT(scale_factor != std::nullopt); auto scale_factors = *scale_factor; if (recompute_scale_factor == std::nullopt) { // only warn when the scales have floating values since // the result for ints is the same with/without recompute_scale_factor bool is_float_scale_factor = false; for (double scale : scale_factors) { is_float_scale_factor = floor(scale) != scale; if (is_float_scale_factor) { break; } } if (is_float_scale_factor) { TORCH_WARN( "The default behavior for interpolate/upsample with float scale_factor changed " "in 1.6.0 to align with other frameworks/libraries, and uses scale_factor directly, " "instead of relying on the computed output size. " "If you wish to keep the old behavior, please set recompute_scale_factor=True. " "See the documentation of nn.Upsample for details. "); } } std::vector<int64_t> ret; for (const auto i : c10::irange(dim)) { ret.emplace_back(static_cast<int64_t>( floor(static_cast<double>(input.size(i + 2)) * scale_factors[i]))); } return ret; } #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor interpolate( const Tensor& input, const std::optional<std::vector<int64_t>>& size, const std::optional<std::vector<double>>& scale_factor, InterpolateFuncOptions::mode_t mode, std::optional<bool> align_corners, std::optional<bool> recompute_scale_factor, bool antialias) { if (std::holds_alternative<enumtype::kNearest>(mode) || std::get_if<enumtype::kArea>(&mode)) { if (align_corners != std::nullopt) { TORCH_CHECK( false, "align_corners option can only be set with the " "interpolating modes: linear | bilinear | bicubic | trilinear"); } } else { if (align_corners == std::nullopt) { TORCH_WARN( "Default upsampling behavior when mode=", enumtype::get_enum_name(mode), " is changed " "to align_corners=False since 0.4.0. Please specify " "align_corners=True if the old behavior is desired. " "See the documentation of nn.Upsample for details."); align_corners = false; } } TORCH_CHECK( input.dim() >= 3 && input.dim() <= 5, "Input Error: Only 3D, 4D and 5D input Tensors supported " "(got ", input.dim(), "D) for the modes: nearest | linear | bilinear | bicubic | trilinear " "(got ", enumtype::get_enum_name(mode), ")"); auto scale_factor_len = input.dim() - 2; std::vector<std::optional<double>> scale_factor_list( scale_factor_len, std::nullopt); if (scale_factor != std::nullopt && !recompute_scale_factor.value_or(false)) { auto _scale_factor_repeated = *scale_factor; scale_factor_list = {}; for (const auto& elem : _scale_factor_repeated) { scale_factor_list.emplace_back(elem); } } if (antialias && !(input.dim() == 4 && (std::get_if<enumtype::kBilinear>(&mode) || std::get_if<enumtype::kBicubic>(&mode)))) { TORCH_CHECK( false, "Anti-alias option is only supported for bilinear and bicubic modes"); } auto closed_over_args = std::make_tuple(input, size, scale_factor, recompute_scale_factor); if (input.dim() == 3 && std::get_if<enumtype::kNearest>(&mode)) { return torch::upsample_nearest1d( input, _interp_output_size(1, std::move(closed_over_args)), scale_factor_list.at(0)); } else if (input.dim() == 4 && std::get_if<enumtype::kNearest>(&mode)) { return torch::upsample_nearest2d( input, _interp_output_size(2, std::move(closed_over_args)), scale_factor_list.at(0), scale_factor_list.at(1)); } else if (input.dim() == 5 && std::get_if<enumtype::kNearest>(&mode)) { return torch::upsample_nearest3d( input, _interp_output_size(3, std::move(closed_over_args)), scale_factor_list.at(0), scale_factor_list.at(1), scale_factor_list.at(2)); } else if (input.dim() == 3 && std::get_if<enumtype::kNearestExact>(&mode)) { return torch::_upsample_nearest_exact1d( input, _interp_output_size(1, std::move(closed_over_args)), scale_factor_list.at(0)); } else if (input.dim() == 4 && std::get_if<enumtype::kNearestExact>(&mode)) { return torch::_upsample_nearest_exact2d( input, _interp_output_size(2, std::move(closed_over_args)), scale_factor_list.at(0), scale_factor_list.at(1)); } else if (input.dim() == 5 && std::get_if<enumtype::kNearestExact>(&mode)) { return torch::_upsample_nearest_exact3d( input, _interp_output_size(3, std::move(closed_over_args)), scale_factor_list.at(0), scale_factor_list.at(1), scale_factor_list.at(2)); } else if (input.dim() == 3 && std::get_if<enumtype::kArea>(&mode)) { return detail::adaptive_avg_pool1d( input, _interp_output_size(1, std::move(closed_over_args))); } else if (input.dim() == 4 && std::get_if<enumtype::kArea>(&mode)) { return detail::adaptive_avg_pool2d( input, _interp_output_size(2, std::move(closed_over_args))); } else if (input.dim() == 5 && std::get_if<enumtype::kArea>(&mode)) { return detail::adaptive_avg_pool3d( input, _interp_output_size(3, std::move(closed_over_args))); } else if (input.dim() == 3 && std::get_if<enumtype::kLinear>(&mode)) { TORCH_CHECK( align_corners != std::nullopt, "align_corners should be specified."); return torch::upsample_linear1d( input, _interp_output_size(1, std::move(closed_over_args)), *align_corners, scale_factor_list.at(0)); } else if (input.dim() == 3 && std::get_if<enumtype::kBilinear>(&mode)) { TORCH_CHECK(false, "Got 3D input, but bilinear mode needs 4D input"); } else if (input.dim() == 3 && std::get_if<enumtype::kTrilinear>(&mode)) { TORCH_CHECK(false, "Got 3D input, but trilinear mode needs 5D input"); } else if (input.dim() == 4 && std::get_if<enumtype::kLinear>(&mode)) { TORCH_CHECK(false, "Got 4D input, but linear mode needs 3D input"); } else if (input.dim() == 4 && std::get_if<enumtype::kBilinear>(&mode)) { TORCH_CHECK( align_corners != std::nullopt, "align_corners should be specified."); if (antialias) { return torch::_upsample_bilinear2d_aa( input, _interp_output_size(2, std::move(closed_over_args)), *align_corners, scale_factor_list.at(0), scale_factor_list.at(1)); } return torch::upsample_bilinear2d( input, _interp_output_size(2, std::move(closed_over_args)), *align_corners, scale_factor_list.at(0), scale_factor_list.at(1)); } else if (input.dim() == 4 && std::get_if<enumtype::kTrilinear>(&mode)) { TORCH_CHECK(false, "Got 4D input, but trilinear mode needs 5D input"); } else if (input.dim() == 5 && std::get_if<enumtype::kLinear>(&mode)) { TORCH_CHECK(false, "Got 5D input, but linear mode needs 3D input"); } else if (input.dim() == 5 && std::get_if<enumtype::kBilinear>(&mode)) { TORCH_CHECK(false, "Got 5D input, but bilinear mode needs 4D input"); } else if (input.dim() == 5 && std::get_if<enumtype::kTrilinear>(&mode)) { TORCH_CHECK( align_corners != std::nullopt, "align_corners should be specified."); return torch::upsample_trilinear3d( input, _interp_output_size(3, std::move(closed_over_args)), *align_corners, scale_factor_list.at(0), scale_factor_list.at(1), scale_factor_list.at(2)); } else if (input.dim() == 4 && std::get_if<enumtype::kBicubic>(&mode)) { TORCH_CHECK( align_corners != std::nullopt, "align_corners should be specified."); if (antialias) { return torch::_upsample_bicubic2d_aa( input, _interp_output_size(2, std::move(closed_over_args)), *align_corners, scale_factor_list.at(0), scale_factor_list.at(1)); } return torch::upsample_bicubic2d( input, _interp_output_size(2, std::move(closed_over_args)), *align_corners, scale_factor_list.at(0), scale_factor_list.at(1)); } else { TORCH_CHECK( false, "Input Error: Only 3D, 4D and 5D input Tensors supported " "(got ", input.dim(), "D) for the modes: nearest | linear | bilinear | bicubic | trilinear " "(got ", enumtype::get_enum_name(mode), ")"); } } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.interpolate /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::InterpolateFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::interpolate(input, /// F::InterpolateFuncOptions().size({4}).mode(torch::kNearest)); /// ``` inline Tensor interpolate( const Tensor& input, const InterpolateFuncOptions& options = {}) { return detail::interpolate( input, options.size(), options.scale_factor(), options.mode(), options.align_corners(), options.recompute_scale_factor(), options.antialias()); } } // namespace torch::nn::functional ```
========================================================================================================================================================= SOURCE CODE FILE: vision.h LINES: 1 SIZE: 3.61 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\functional\vision.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/options/vision.h> #include <torch/types.h> namespace torch::nn::functional { inline Tensor affine_grid( const Tensor& theta, const IntArrayRef& size, bool align_corners = false) { // enforce floating point dtype on theta TORCH_CHECK( theta.is_floating_point(), "Expected theta to have floating point type, but got ", theta.dtype()); // check that shapes and sizes match if (size.size() == 4) { TORCH_CHECK( theta.dim() == 3 && theta.size(-2) == 2 && theta.size(-1) == 3, "Expected a batch of 2D affine matrices of shape Nx2x3 for size ", size, ". Got ", theta.sizes(), "."); } else if (size.size() == 5) { TORCH_CHECK( theta.dim() == 3 && theta.size(-2) == 3 && theta.size(-1) == 4, "Expected a batch of 3D affine matrices of shape Nx3x4 for size ", size, ". Got ", theta.sizes(), "."); } else { TORCH_CHECK( false, "affine_grid only supports 4D and 5D sizes, ", "for 2D and 3D affine transforms, respectively. ", "Got size ", size); } if (*std::min_element(size.begin(), size.end()) <= 0) { TORCH_CHECK(false, "Expected non-zero, positive output size. Got ", size); } return torch::affine_grid_generator(theta, size, align_corners); } // ============================================================================ #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline Tensor grid_sample( const Tensor& input, const Tensor& grid, GridSampleFuncOptions::mode_t mode, GridSampleFuncOptions::padding_mode_t padding_mode, std::optional<bool> align_corners) { int64_t mode_enum = 0, padding_mode_enum = 0; if (std::holds_alternative<enumtype::kBilinear>(mode)) { mode_enum = 0; } else if (std::holds_alternative<enumtype::kNearest>(mode)) { mode_enum = 1; } else { /// mode == 'bicubic' mode_enum = 2; } if (std::holds_alternative<enumtype::kZeros>(padding_mode)) { padding_mode_enum = 0; } else if (std::holds_alternative<enumtype::kBorder>(padding_mode)) { padding_mode_enum = 1; } else { /// padding_mode == 'reflection' padding_mode_enum = 2; } if (!align_corners.has_value()) { TORCH_WARN( "Default grid_sample and affine_grid behavior has changed ", "to align_corners=False since 1.3.0. Please specify ", "align_corners=True if the old behavior is desired. ", "See the documentation of grid_sample for details."); align_corners = false; } return torch::grid_sampler( input, grid, mode_enum, padding_mode_enum, align_corners.value()); } } // namespace detail #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /// See /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.grid_sample /// about the exact behavior of this functional. /// /// See the documentation for `torch::nn::functional::GridSampleFuncOptions` /// class to learn what optional arguments are supported for this functional. /// /// Example: /// ``` /// namespace F = torch::nn::functional; /// F::grid_sample(input, grid, /// F::GridSampleFuncOptions().mode(torch::kBilinear).padding_mode(torch::kZeros).align_corners(true)); /// ``` inline Tensor grid_sample( const Tensor& input, const Tensor& grid, const GridSampleFuncOptions& options = {}) { return detail::grid_sample( input, grid, options.mode(), options.padding_mode(), options.align_corners()); } } // namespace torch::nn::functional ```
============================================================================================================================================ SOURCE CODE FILE: init.h LINES: 1 SIZE: 4.92 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\init.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <torch/enum.h> #include <torch/types.h> namespace torch { namespace nn::init { using NonlinearityType = std::variant< enumtype::kLinear, enumtype::kConv1D, enumtype::kConv2D, enumtype::kConv3D, enumtype::kConvTranspose1D, enumtype::kConvTranspose2D, enumtype::kConvTranspose3D, enumtype::kSigmoid, enumtype::kTanh, enumtype::kReLU, enumtype::kLeakyReLU>; using FanModeType = std::variant<enumtype::kFanIn, enumtype::kFanOut>; } // namespace nn::init namespace nn::init { /// Return the recommended gain value for the given nonlinearity function. TORCH_API double calculate_gain( NonlinearityType nonlinearity, double param = 0.01); /// Fills the given `tensor` with the provided `value` in-place, and returns it. /// No gradient will be recorded for this operation. TORCH_API Tensor constant_(Tensor tensor, Scalar value); /// Fills the given `tensor` with the Dirac delta function in-place, and returns /// it. No gradient will be recorded for this operation. TORCH_API Tensor dirac_(Tensor tensor); /// Fills the given 2-dimensional `matrix` with an identity matrix. /// No gradient will be recorded for this operation. TORCH_API Tensor eye_(Tensor matrix); /// Fills the given 2-dimensional `matrix` with values drawn from a normal /// distribution parameterized by `mean` and `std`. /// No gradient will be recorded for this operation. TORCH_API Tensor normal_(Tensor tensor, double mean = 0, double std = 1); /// Fills the given `tensor` with ones. /// No gradient will be recorded for this operation. TORCH_API Tensor ones_(Tensor tensor); /// Fills the input `Tensor` with a (semi) orthogonal matrix, as described in /// "Exact solutions to the nonlinear dynamics of learning in deep linear neural /// networks" - Saxe, A. et al. (2013). The input tensor must have at least 2 /// dimensions, and for tensors with more than 2 dimensions the trailing /// dimensions are flattened. /// No gradient will be recorded for this operation. TORCH_API Tensor orthogonal_(Tensor tensor, double gain = 1.0); /// Fills the 2D input `Tensor` as a sparse matrix, where the /// non-zero elements will be drawn from a centered normal distribution /// with the given standard deviation `std`, as described in "Deep learning via /// Hessian-free optimization" - Martens, J. (2010). The `sparsity` is a real /// value between 0 and 1 that controls the fraction of elements in each column /// to be set to zero. /// No gradient will be recorded for this operation. TORCH_API Tensor sparse_(Tensor tensor, double sparsity, double std = 0.01); /// Fills the given 2-dimensional `matrix` with values drawn from a uniform /// distribution parameterized by `low` and `high`. /// No gradient will be recorded for this operation. TORCH_API Tensor uniform_(Tensor tensor, double low = 0, double high = 1); /// Fills the input `Tensor` with values according to the method /// described in "Delving deep into rectifiers: Surpassing human-level /// performance on ImageNet classification" - He, K. et al. (2015), using a /// normal distribution. Also known as He initialization. /// No gradient will be recorded for this operation. TORCH_API Tensor kaiming_normal_( Tensor tensor, double a = 0, FanModeType mode = torch::kFanIn, NonlinearityType nonlinearity = torch::kLeakyReLU); /// Fills the input `Tensor` with values according to the method /// described in "Delving deep into rectifiers: Surpassing human-level /// performance on ImageNet classification" - He, K. et al. (2015), using a /// uniform distribution. Also known as He initialization. /// No gradient will be recorded for this operation. TORCH_API Tensor kaiming_uniform_( Tensor tensor, double a = 0, FanModeType mode = torch::kFanIn, NonlinearityType nonlinearity = torch::kLeakyReLU); /// Fills the input `Tensor` with values according to the method /// described in "Understanding the difficulty of training deep feedforward /// neural networks" - Glorot, X. & Bengio, Y. (2010). Values are scaled by the /// `gain` parameter. No gradient will be recorded for this operation. TORCH_API Tensor xavier_normal_(Tensor tensor, double gain = 1.0); /// Fills the input `Tensor` with values according to the method /// described in "Understanding the difficulty of training deep feedforward /// neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform /// distribution. Values are scaled by the `gain` parameter /// No gradient will be recorded for this operation. TORCH_API Tensor xavier_uniform_(Tensor tensor, double gain = 1.0); /// Fills the given `tensor` with zeros. /// No gradient will be recorded for this operation. TORCH_API Tensor zeros_(Tensor tensor); TORCH_API std::tuple<int64_t, int64_t> _calculate_fan_in_and_fan_out( const Tensor& tensor); } // namespace nn::init } // namespace torch ```
============================================================================================================================================== SOURCE CODE FILE: module.h LINES: 1 SIZE: 26.87 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\module.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/modules/container/any_module_holder.h> #include <torch/nn/modules/container/any_value.h> #include <torch/nn/pimpl.h> #include <torch/ordered_dict.h> #include <torch/serialize/archive.h> #include <torch/types.h> #include <ATen/ATen.h> #include <functional> #include <iosfwd> #include <map> #include <memory> #include <string> #include <type_traits> namespace torch::nn { /// The base class for all modules in PyTorch. /// /// \rst /// .. note:: /// The design and implementation of this class is largely based on the Python /// API. You may want to consult the python documentation for /// :py:class:`pytorch:torch.nn.Module` for further clarification on certain /// methods or behavior. /// \endrst /// /// A `Module` is an abstraction over the implementation of some function or /// algorithm, possibly associated with some persistent data. A `Module` may /// contain further `Module`s ("submodules"), each with their own /// implementation, persistent data and further submodules. `Module`s can thus /// be said to form a recursive tree structure. A `Module` is registered as a /// submodule to another `Module` by calling `register_module()`, typically from /// within a parent module's constructor. /// /// A distinction is made between three kinds of persistent data that may be /// associated with a `Module`: /// /// 1. *Parameters*: tensors that record gradients, typically weights updated /// during the backward step (e.g. the `weight` of a `Linear` module), /// 2. *Buffers*: tensors that do not record gradients, typically updated during /// the forward step, such as running statistics (e.g. `mean` and `variance` /// in the `BatchNorm` module), /// 3. Any additional state, not necessarily tensors, required for the /// implementation or configuration of a `Module`. /// /// The first two kinds of state are special in that they may be registered /// with the `Module` system to allow convenient access and batch configuration. /// For example, registered parameters in any `Module` may be iterated over via /// the `parameters()` accessor. Further, changing the data type of a `Module`'s /// registered parameters can be done conveniently via `Module::to()`, e.g. /// `module->to(torch::kCUDA)` to move all parameters to GPU memory. Lastly, /// registered parameters and buffers are handled specially during a `clone()` /// operation, which performs a deepcopy of a cloneable `Module` hierarchy. /// /// Parameters are registered with a `Module` via `register_parameter`. Buffers /// are registered separately via `register_buffer`. These methods are part of /// the public API of `Module` and are typically invoked from within a /// concrete `Module`s constructor. class TORCH_API Module : public std::enable_shared_from_this<Module> { public: using ModuleApplyFunction = std::function<void(Module&)>; using ConstModuleApplyFunction = std::function<void(const Module&)>; using NamedModuleApplyFunction = std::function<void(const std::string&, Module&)>; using ConstNamedModuleApplyFunction = std::function<void(const std::string&, const Module&)>; using ModulePointerApplyFunction = std::function<void(const std::shared_ptr<Module>&)>; using NamedModulePointerApplyFunction = std::function<void(const std::string&, const std::shared_ptr<Module>&)>; /// Tells the base `Module` about the name of the submodule. explicit Module(std::string name); /// Constructs the module without immediate knowledge of the submodule's name. /// The name of the submodule is inferred via RTTI (if possible) the first /// time `.name()` is invoked. Module(); Module(const Module&) = default; Module& operator=(const Module&) = default; Module(Module&&) noexcept = default; Module& operator=(Module&&) noexcept = default; virtual ~Module() = default; /// Returns the name of the `Module`. /// /// A `Module` has an associated `name`, which is a string representation of /// the kind of concrete `Module` it represents, such as `"Linear"` for the /// `Linear` module. Under most circumstances, this name is automatically /// inferred via runtime type information (RTTI). In the unusual circumstance /// that you have this feature disabled, you may want to manually name your /// `Module`s by passing the string name to the `Module` base class' /// constructor. const std::string& name() const noexcept; /// Performs a recursive deep copy of the module and all its registered /// parameters, buffers and submodules. /// /// Optionally, this method sets the current device /// to the one supplied before cloning. If no device is given, each /// parameter and buffer will be moved to the device of its source. /// /// \rst /// .. attention:: /// Attempting to call the `clone()` method inherited from the base `Module` /// class (the one documented here) will fail. To inherit an actual /// implementation of `clone()`, you must subclass `Cloneable`. `Cloneable` /// is templatized on the concrete module type, and can thus properly copy a /// `Module`. This method is provided on the base class' API solely for an /// easier-to-use polymorphic interface. /// \endrst virtual std::shared_ptr<Module> clone( const std::optional<Device>& device = std::nullopt) const; /// Applies the `function` to the `Module` and recursively to every submodule. /// The function must accept a `Module&`. /// /// \rst /// .. code-block:: cpp /// MyModule module; /// module->apply([](nn::Module& module) { /// std::cout << module.name() << std::endl; /// }); /// \endrst void apply(const ModuleApplyFunction& function); /// Applies the `function` to the `Module` and recursively to every submodule. /// The function must accept a `const Module&`. /// /// \rst /// .. code-block:: cpp /// MyModule module; /// module->apply([](const nn::Module& module) { /// std::cout << module.name() << std::endl; /// }); /// \endrst void apply(const ConstModuleApplyFunction& function) const; /// Applies the `function` to the `Module` and recursively to every submodule. /// The function must accept a `const std::string&` for the key of the module, /// and a `Module&`. The key of the module itself is the empty string. If /// `name_prefix` is given, it is prepended to every key as /// `<name_prefix>.<key>` (and just `name_prefix` for the module itself). /// /// \rst /// .. code-block:: cpp /// MyModule module; /// module->apply([](const std::string& key, nn::Module& module) { /// std::cout << key << ": " << module.name() << std::endl; /// }); /// \endrst void apply( const NamedModuleApplyFunction& function, const std::string& name_prefix = std::string()); /// Applies the `function` to the `Module` and recursively to every submodule. /// The function must accept a `const std::string&` for the key of the module, /// and a `const Module&`. The key of the module itself is the empty string. /// If `name_prefix` is given, it is prepended to every key as /// `<name_prefix>.<key>` (and just `name_prefix` for the module itself). /// /// \rst /// .. code-block:: cpp /// MyModule module; /// module->apply([](const std::string& key, const nn::Module& module) { /// std::cout << key << ": " << module.name() << std::endl; /// }); /// \endrst void apply( const ConstNamedModuleApplyFunction& function, const std::string& name_prefix = std::string()) const; /// Applies the `function` to the `Module` and recursively to every submodule. /// The function must accept a `const std::shared_ptr<Module>&`. /// /// \rst /// .. code-block:: cpp /// MyModule module; /// module->apply([](const std::shared_ptr<nn::Module>& module) { /// std::cout << module->name() << std::endl; /// }); /// \endrst void apply(const ModulePointerApplyFunction& function) const; /// Applies the `function` to the `Module` and recursively to every submodule. /// The function must accept a `const std::string&` for the key of the module, /// and a `const std::shared_ptr<Module>&`. The key of the module itself is /// the empty string. If `name_prefix` is given, it is prepended to every key /// as /// `<name_prefix>.<key>` (and just `name_prefix` for the module itself). /// /// \rst /// .. code-block:: cpp /// MyModule module; /// module->apply([](const std::string& key, /// const std::shared_ptr<nn::Module>& module) { /// std::cout << key << ": " << module->name() << std::endl; /// }); /// \endrst void apply( const NamedModulePointerApplyFunction& function, const std::string& name_prefix = std::string()) const; /// Returns the parameters of this `Module` and if `recurse` is true, also /// recursively of every submodule. std::vector<Tensor> parameters(bool recurse = true) const; /// Returns an `OrderedDict` with the parameters of this `Module` along with /// their keys, and if `recurse` is true also recursively of every submodule. OrderedDict<std::string, Tensor> named_parameters(bool recurse = true) const; /// Returns the buffers of this `Module` and if `recurse` is true, also /// recursively of every submodule. std::vector<Tensor> buffers(bool recurse = true) const; /// Returns an `OrderedDict` with the buffers of this `Module` along with /// their keys, and if `recurse` is true also recursively of every submodule. OrderedDict<std::string, Tensor> named_buffers(bool recurse = true) const; /// Returns the submodules of this `Module` (the entire submodule hierarchy) /// and if `include_self` is true, also inserts a `shared_ptr` to this module /// in the first position. /// /// \rst /// .. warning:: /// Only pass `include_self` as `true` if this `Module` is stored in a /// `shared_ptr`! Otherwise an exception will be thrown. You may still call /// this method with `include_self` set to false if your `Module` is not /// stored in a `shared_ptr`. /// \endrst std::vector<std::shared_ptr<Module>> modules(bool include_self = true) const; /// Returns an `OrderedDict` of the submodules of this `Module` (the entire /// submodule hierarchy) and their keys, and if `include_self` is true, also /// inserts a `shared_ptr` to this module in the first position. If /// `name_prefix` is given, it is prepended to every key as /// `<name_prefix>.<key>` (and just `name_prefix` for the module itself). /// /// \rst /// .. warning:: /// Only pass `include_self` as `true` if this `Module` is stored in a /// `shared_ptr`! Otherwise an exception will be thrown. You may still call /// this method with `include_self` set to false if your `Module` is not /// stored in a `shared_ptr`. /// \endrst OrderedDict<std::string, std::shared_ptr<Module>> named_modules( const std::string& name_prefix = std::string(), bool include_self = true) const; /// Returns the direct submodules of this `Module`. std::vector<std::shared_ptr<Module>> children() const; /// Returns an `OrderedDict` of the direct submodules of this `Module` and /// their keys. OrderedDict<std::string, std::shared_ptr<Module>> named_children() const; /// Enables "training" mode. virtual void train(bool on = true); /// Calls train(false) to enable "eval" mode. /// Do not override this method, override `train()` instead. void eval(); /// True if the module is in training mode. /// /// Every `Module` has a boolean associated with it that determines whether /// the `Module` is currently in *training* mode (set via `.train()`) or in /// *evaluation* (inference) mode (set via `.eval()`). This property is /// exposed via `is_training()`, and may be used by the implementation of a /// concrete module to modify its runtime behavior. See the `BatchNorm` or /// `Dropout` modules for examples of `Module`s that use different code paths /// depending on this property. virtual bool is_training() const noexcept; /// Recursively casts all parameters to the given `dtype` and `device`. /// /// If `non_blocking` is true and the source is in pinned memory and /// destination is on the GPU or vice versa, the copy is performed /// asynchronously with respect to the host. Otherwise, the argument has no /// effect. virtual void to( torch::Device device, torch::Dtype dtype, bool non_blocking = false); /// Recursively casts all parameters to the given dtype. /// /// If `non_blocking` is true and the source is in pinned memory and /// destination is on the GPU or vice versa, the copy is performed /// asynchronously with respect to the host. Otherwise, the argument has no /// effect. virtual void to(torch::Dtype dtype, bool non_blocking = false); /// Recursively moves all parameters to the given device. /// /// If `non_blocking` is true and the source is in pinned memory and /// destination is on the GPU or vice versa, the copy is performed /// asynchronously with respect to the host. Otherwise, the argument has no /// effect. virtual void to(torch::Device device, bool non_blocking = false); /// Recursively zeros out the `grad` value of each registered parameter. virtual void zero_grad(bool set_to_none = true); /// Attempts to cast this `Module` to the given `ModuleType`. /// /// This method is useful when calling `apply()`. /// \rst /// .. code-block:: cpp /// /// void initialize_weights(nn::Module& module) { /// torch::NoGradGuard no_grad; /// if (auto* linear = module.as<nn::Linear>()) { /// linear->weight.normal_(0.0, 0.02); /// } /// } /// /// MyModule module; /// module->apply(initialize_weights); /// \endrst template <typename ModuleType> typename ModuleType::ContainedType* as() noexcept; /// Attempts to cast this `Module` to the given `ModuleType`. /// /// This method is useful when calling `apply()`. /// \rst /// .. code-block:: cpp /// void initialize_weights(nn::Module& module) { /// torch::NoGradGuard no_grad; /// if (auto* linear = module.as<nn::Linear>()) { /// linear->weight.normal_(0.0, 0.02); /// } /// } /// /// MyModule module; /// module->apply(initialize_weights); /// \endrst template <typename ModuleType> const typename ModuleType::ContainedType* as() const noexcept; /// Attempts to cast this `Module` to the given `ModuleType`. /// /// This method is useful when calling `apply()`. /// \rst /// .. code-block:: cpp /// /// void initialize_weights(nn::Module& module) { /// torch::NoGradGuard no_grad; /// if (auto* linear = module.as<nn::Linear>()) { /// linear->weight.normal_(0.0, 0.02); /// } /// } /// /// MyModule module; /// module.apply(initialize_weights); /// \endrst template < typename ModuleType, typename = torch::detail::disable_if_module_holder_t<ModuleType>> ModuleType* as() noexcept; /// Attempts to cast this `Module` to the given `ModuleType`. /// /// This method is useful when calling `apply()`. /// \rst /// .. code-block:: cpp /// /// void initialize_weights(nn::Module& module) { /// torch::NoGradGuard no_grad; /// if (auto* linear = module.as<nn::Linear>()) { /// linear->weight.normal_(0.0, 0.02); /// } /// } /// /// MyModule module; /// module.apply(initialize_weights); /// \endrst template < typename ModuleType, typename = torch::detail::disable_if_module_holder_t<ModuleType>> const ModuleType* as() const noexcept; /// Serializes the `Module` into the given `OutputArchive`. /// /// If the `Module` contains unserializable submodules (e.g. /// `nn::Functional`), those submodules are skipped when serializing. virtual void save(serialize::OutputArchive& archive) const; /// Deserializes the `Module` from the given `InputArchive`. /// /// If the `Module` contains unserializable submodules (e.g. /// `nn::Functional`), we don't check the existence of those submodules in the /// `InputArchive` when deserializing. virtual void load(serialize::InputArchive& archive); /// Streams a pretty representation of the `Module` into the given `stream`. /// By default, this representation will be the name of the module (taken from /// `name()`), followed by a recursive pretty print of all of the `Module`'s /// submodules. /// /// Override this method to change the pretty print. The input /// `stream` should be returned from the method, to allow easy chaining. virtual void pretty_print(std::ostream& stream) const; /// Returns whether the `Module` is serializable. virtual bool is_serializable() const; /// Registers a parameter with this `Module`. /// /// A parameter should be any gradient-recording tensor used in the /// implementation of your `Module`. Registering it makes it available to /// methods such as `parameters()`, `clone()` or `to().` /// /// Note that registering an undefined Tensor (e.g. /// `module.register_parameter("param", Tensor())`) is allowed, and is /// equivalent to `module.register_parameter("param", None)` in Python API. /// /// \rst /// .. code-block:: cpp /// /// MyModule::MyModule() { /// weight_ = register_parameter("weight", torch::randn({A, B})); /// } /// \endrst Tensor& register_parameter( std::string name, Tensor tensor, bool requires_grad = true); /// Registers a buffer with this `Module`. /// /// A buffer is intended to be state in your module that does not record /// gradients, such as running statistics. Registering it makes it available /// to methods such as `buffers()`, `clone()` or `to(). /// /// \rst /// .. code-block:: cpp /// /// MyModule::MyModule() { /// mean_ = register_buffer("mean", torch::empty({num_features_})); /// } /// \endrst Tensor& register_buffer(std::string name, Tensor tensor); /// Registers a submodule with this `Module`. /// /// Registering a module makes it available to methods such as `modules()`, /// `clone()` or `to()`. /// /// \rst /// .. code-block:: cpp /// /// MyModule::MyModule() { /// submodule_ = register_module("linear", torch::nn::Linear(3, 4)); /// } /// \endrst template <typename ModuleType> std::shared_ptr<ModuleType> register_module( std::string name, std::shared_ptr<ModuleType> module); /// Registers a submodule with this `Module`. /// /// This method deals with `ModuleHolder`s. /// /// Registering a module makes it available to methods such as `modules()`, /// `clone()` or `to()`. /// /// \rst /// .. code-block:: cpp /// /// MyModule::MyModule() { /// submodule_ = register_module("linear", torch::nn::Linear(3, 4)); /// } /// \endrst template <typename ModuleType> std::shared_ptr<ModuleType> register_module( std::string name, ModuleHolder<ModuleType> module_holder); /// Replaces a registered submodule with this `Module`. /// /// This takes care of the registration, if you used submodule members, you /// should // assign the submodule as well, i.e. use as /// module->submodule_ = module->replace_module("linear", /// torch::nn::Linear(3, 4)); /// It only works when a module of the name is already registered. /// /// This is useful for replacing a module after initialization, e.g. /// for finetuning. template <typename ModuleType> std::shared_ptr<ModuleType> replace_module( const std::string& name, std::shared_ptr<ModuleType> module); /// Replaces a registered submodule with this `Module`. /// This method deals with `ModuleHolder`s. /// /// This takes care of the registration, if you used submodule members, you /// should // assign the submodule as well, i.e. use as /// module->submodule_ = module->replace_module("linear", linear_holder); /// It only works when a module of the name is already registered. /// /// This is useful for replacing a module after initialization, e.g. /// for finetuning. template <typename ModuleType> std::shared_ptr<ModuleType> replace_module( const std::string& name, ModuleHolder<ModuleType> module_holder); /// Unregisters a submodule from this `Module`. If there is no such module /// with `name` an exception is thrown. void unregister_module(const std::string& name); protected: /// The following three functions allow a module with default arguments in its /// forward method to be used in a Sequential module. /// You should NEVER override these functions manually. Instead, you should /// use the `FORWARD_HAS_DEFAULT_ARGS` macro. virtual bool _forward_has_default_args() { return false; } virtual unsigned int _forward_num_required_args() { TORCH_CHECK( false, "torch::nn::Module subclass that has default arguments in `forward` method ", "must override `_forward_num_required_args` method. Please use ", "`FORWARD_HAS_DEFAULT_ARGS` macro to do so."); } virtual std::vector<AnyValue> _forward_populate_default_args( std::vector<AnyValue>&& arguments) { TORCH_CHECK( false, "torch::nn::Module subclass that has default arguments in `forward` method ", "must override `_forward_populate_default_args` method. Please use ", "`FORWARD_HAS_DEFAULT_ARGS` macro to do so."); } /// The registered parameters of this `Module`. /// Inorder to access parameters_ in ParameterDict and ParameterList // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) OrderedDict<std::string, Tensor> parameters_; private: // Friend classes. template <typename Derived> friend class Cloneable; template <typename ModuleType, typename... ArgumentTypes> friend struct AnyModuleHolder; /// Pretty prints the given `Module` into the `ostream`. TORCH_API friend std::ostream& operator<<( std::ostream& stream, const nn::Module& module); // data parallel using this method to configure gradient edges during the // replicate step. template <typename ModuleType> friend void replicate_grad_edges( const std::shared_ptr<Module>& module, const std::vector<std::shared_ptr<ModuleType>>& replicas, const std::vector<Device>& devices); // Private methods. /// Used in the implementation of `Cloneable`. virtual void clone_(Module& other, const std::optional<Device>& device); /// The implementation of the various `to()` methods. template <typename... Ts> void to_impl(Ts&&... ts); /// Implements pretty printing the module hierarchy. void pretty_print_recursive( std::ostream& stream, const std::string& indentation) const; /// Applies the `function` to every submodule recursively, starting at this /// `Module`'s children (thus not including the module itself). void apply_to_submodules( const NamedModulePointerApplyFunction& function, const std::string& name_prefix = std::string()) const; /// Returns a shared_ptr to `this` in a safe (checked) way. std::shared_ptr<Module> shared_from_this_checked() const; /// The registered buffers of this `Module`. OrderedDict<std::string, Tensor> buffers_; /// The registered (direct) submodules of this `Module`. OrderedDict<std::string, std::shared_ptr<Module>> children_; /// The module's name (e.g. "LSTM"). mutable std::optional<std::string> name_; /// Whether the module is in training mode. bool is_training_{true}; }; /// Serialize a `Module` pointer into an `OutputArchive`. TORCH_API serialize::OutputArchive& operator<<( serialize::OutputArchive& archive, const std::shared_ptr<nn::Module>& module); /// Deserializes a `Module` from an `InputArchive`. TORCH_API serialize::InputArchive& operator>>( serialize::InputArchive& archive, const std::shared_ptr<nn::Module>& module); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nn::Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename ModuleType> typename ModuleType::ContainedType* Module::as() noexcept { // Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for // `Linear`, since `LinearImpl` inherits `nn::Module`. return as<typename ModuleType::ContainedType>(); } template <typename ModuleType> const typename ModuleType::ContainedType* Module::as() const noexcept { // Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for // `Linear`, since `LinearImpl` inherits `nn::Module`. return as<typename ModuleType::ContainedType>(); } template <typename ModuleType, typename> ModuleType* Module::as() noexcept { return dynamic_cast<ModuleType*>(this); } template <typename ModuleType, typename> const ModuleType* Module::as() const noexcept { return dynamic_cast<const ModuleType*>(this); } template <typename ModuleType> std::shared_ptr<ModuleType> Module::register_module( std::string name, std::shared_ptr<ModuleType> module) { TORCH_CHECK(!name.empty(), "Submodule name must not be empty"); TORCH_CHECK( name.find('.') == std::string::npos, "Submodule name must not contain a dot (got '", name, "')"); auto& base_module = children_.insert(std::move(name), std::move(module)); return std::dynamic_pointer_cast<ModuleType>(base_module); } template <typename ModuleType> std::shared_ptr<ModuleType> Module::register_module( std::string name, ModuleHolder<ModuleType> module_holder) { return register_module(std::move(name), module_holder.ptr()); } template <typename ModuleType> std::shared_ptr<ModuleType> Module::replace_module( const std::string& name, std::shared_ptr<ModuleType> module) { auto& base_module = (children_[name] = std::move(module)); return std::dynamic_pointer_cast<ModuleType>(base_module); } template <typename ModuleType> std::shared_ptr<ModuleType> Module::replace_module( const std::string& name, ModuleHolder<ModuleType> module_holder) { return replace_module(name, module_holder.ptr()); } template <typename... Ts> void Module::to_impl(Ts&&... ts) { // First call `to()` on every child module. for (auto& child : children_) { child.value()->to(ts...); } // Then move every parameter to the new dtype/device. for (auto& parameter : named_parameters(/*recurse=*/false)) { parameter->set_data(parameter->to(ts...)); } // Then move every buffer to the new dtype/device. for (auto& buffer : named_buffers(/*recurse=*/false)) { buffer->set_data(buffer->to(ts...)); } } } // namespace torch::nn ```
=============================================================================================================================================== SOURCE CODE FILE: modules.h LINES: 1 SIZE: 1.29 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules.h ENCODING: utf-8 ```h #pragma once // Common #include <torch/nn/modules/common.h> // Containers #include <torch/nn/modules/container/any.h> #include <torch/nn/modules/container/functional.h> #include <torch/nn/modules/container/moduledict.h> #include <torch/nn/modules/container/modulelist.h> #include <torch/nn/modules/container/named_any.h> #include <torch/nn/modules/container/parameterdict.h> #include <torch/nn/modules/container/parameterlist.h> #include <torch/nn/modules/container/sequential.h> // Layers #include <torch/nn/modules/activation.h> #include <torch/nn/modules/adaptive.h> #include <torch/nn/modules/batchnorm.h> #include <torch/nn/modules/conv.h> #include <torch/nn/modules/distance.h> #include <torch/nn/modules/dropout.h> #include <torch/nn/modules/embedding.h> #include <torch/nn/modules/fold.h> #include <torch/nn/modules/instancenorm.h> #include <torch/nn/modules/linear.h> #include <torch/nn/modules/loss.h> #include <torch/nn/modules/normalization.h> #include <torch/nn/modules/padding.h> #include <torch/nn/modules/pixelshuffle.h> #include <torch/nn/modules/pooling.h> #include <torch/nn/modules/rnn.h> #include <torch/nn/modules/transformer.h> #include <torch/nn/modules/transformercoder.h> #include <torch/nn/modules/transformerlayer.h> #include <torch/nn/modules/upsampling.h> ```
========================================================================================================================================================== SOURCE CODE FILE: _functions.h LINES: 1 SIZE: 0.66 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\_functions.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/autograd/custom_function.h> #include <torch/csrc/autograd/variable.h> #include <torch/nn/options/normalization.h> #include <torch/types.h> namespace torch::nn::functions { class CrossMapLRN2d : public torch::autograd::Function<CrossMapLRN2d> { public: static torch::autograd::Variable forward( torch::autograd::AutogradContext* ctx, const torch::autograd::Variable& input, const CrossMapLRN2dOptions& options); static torch::autograd::variable_list backward( torch::autograd::AutogradContext* ctx, torch::autograd::variable_list grad_output); }; } // namespace torch::nn::functions ```
========================================================================================================================================================== SOURCE CODE FILE: activation.h LINES: 1 SIZE: 30.46 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\activation.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/cloneable.h> #include <torch/nn/functional/activation.h> #include <torch/nn/modules/common.h> #include <torch/nn/modules/linear.h> #include <torch/nn/options/activation.h> #include <torch/csrc/Export.h> namespace torch::nn { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies elu over a given input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ELU to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::ELUOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// ELU model(ELUOptions().alpha(42.42).inplace(true)); /// ``` class TORCH_API ELUImpl : public torch::nn::Cloneable<ELUImpl> { public: explicit ELUImpl(const ELUOptions& options_ = {}); Tensor forward(Tensor input); void reset() override; /// Pretty prints the `ELU` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. ELUOptions options; }; /// A `ModuleHolder` subclass for `ELUImpl`. /// See the documentation for `ELUImpl` class to learn what methods it /// provides, and examples of how to use `ELU` with `torch::nn::ELUOptions`. /// See the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(ELU); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the selu function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.SELU to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::SELUOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// SELU model(SELUOptions().inplace(true)); /// ``` class TORCH_API SELUImpl : public torch::nn::Cloneable<SELUImpl> { public: explicit SELUImpl(const SELUOptions& options_ = {}); Tensor forward(Tensor input); void reset() override; /// Pretty prints the `SELU` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. SELUOptions options; }; /// A `ModuleHolder` subclass for `SELUImpl`. /// See the documentation for `SELUImpl` class to learn what methods it /// provides, and examples of how to use `SELU` with `torch::nn::SELUOptions`. /// See the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(SELU); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hardshrink ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the hard shrinkage function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Hardshrink to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::HardshrinkOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Hardshrink model(HardshrinkOptions().lambda(42.42)); /// ``` class TORCH_API HardshrinkImpl : public torch::nn::Cloneable<HardshrinkImpl> { public: explicit HardshrinkImpl(const HardshrinkOptions& options_ = {}); Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `Hardshrink` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. HardshrinkOptions options; }; /// A `ModuleHolder` subclass for `HardshrinkImpl`. /// See the documentation for `HardshrinkImpl` class to learn what methods it /// provides, and examples of how to use `Hardshrink` with /// `torch::nn::HardshrinkOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Hardshrink); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hardtanh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the HardTanh function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Hardtanh to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::HardtanhOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Hardtanh /// model(HardtanhOptions().min_val(-42.42).max_val(0.42).inplace(true)); /// ``` class TORCH_API HardtanhImpl : public torch::nn::Cloneable<HardtanhImpl> { public: explicit HardtanhImpl(const HardtanhOptions& options_ = {}); Tensor forward(Tensor input); void reset() override; /// Pretty prints the `Hardtanh` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. HardtanhOptions options; }; /// A `ModuleHolder` subclass for `HardtanhImpl`. /// See the documentation for `HardtanhImpl` class to learn what methods it /// provides, and examples of how to use `Hardtanh` with /// `torch::nn::HardtanhOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Hardtanh); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LeakyReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the LeakyReLU function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.LeakyReLU to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::LeakyReLUOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// LeakyReLU model(LeakyReLUOptions().negative_slope(0.42).inplace(true)); /// ``` class TORCH_API LeakyReLUImpl : public torch::nn::Cloneable<LeakyReLUImpl> { public: explicit LeakyReLUImpl(const LeakyReLUOptions& options_ = {}); Tensor forward(Tensor input); void reset() override; /// Pretty prints the `LeakyReLU` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. LeakyReLUOptions options; }; /// A `ModuleHolder` subclass for `LeakyReLUImpl`. /// See the documentation for `LeakyReLUImpl` class to learn what methods it /// provides, and examples of how to use `LeakyReLU` with /// `torch::nn::LeakyReLUOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(LeakyReLU); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LogSigmoid ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the LogSigmoid function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.LogSigmoid to learn /// about the exact behavior of this module. class TORCH_API LogSigmoidImpl : public torch::nn::Cloneable<LogSigmoidImpl> { public: Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `LogSigmoid` module into the given `stream`. void pretty_print(std::ostream& stream) const override; }; /// A `ModuleHolder` subclass for `LogSigmoidImpl`. /// See the documentation for `LogSigmoidImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(LogSigmoid); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softmax ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the Softmax function. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Softmax to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::SoftmaxOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Softmax model(SoftmaxOptions(1)); /// ``` class TORCH_API SoftmaxImpl : public torch::nn::Cloneable<SoftmaxImpl> { public: explicit SoftmaxImpl(int64_t dim) : SoftmaxImpl(SoftmaxOptions(dim)) {} explicit SoftmaxImpl(const SoftmaxOptions& options_); Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `Softmax` module into the given `stream`. void pretty_print(std::ostream& stream) const override; SoftmaxOptions options; }; /// A `ModuleHolder` subclass for `SoftmaxImpl`. /// See the documentation for `SoftmaxImpl` class to learn what methods it /// provides, and examples of how to use `Softmax` with /// `torch::nn::SoftmaxOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Softmax); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softmin ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the Softmin function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Softmin to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::SoftminOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Softmin model(SoftminOptions(1)); /// ``` class TORCH_API SoftminImpl : public torch::nn::Cloneable<SoftminImpl> { public: explicit SoftminImpl(int64_t dim) : SoftminImpl(SoftminOptions(dim)) {} explicit SoftminImpl(const SoftminOptions& options_); Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `Softmin` module into the given `stream`. void pretty_print(std::ostream& stream) const override; SoftminOptions options; }; /// A `ModuleHolder` subclass for `SoftminImpl`. /// See the documentation for `SoftminImpl` class to learn what methods it /// provides, and examples of how to use `Softmin` with /// `torch::nn::SoftminOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Softmin); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LogSoftmax ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the LogSoftmax function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.LogSoftmax to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::LogSoftmaxOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// LogSoftmax model(LogSoftmaxOptions(1)); /// ``` class TORCH_API LogSoftmaxImpl : public torch::nn::Cloneable<LogSoftmaxImpl> { public: explicit LogSoftmaxImpl(int64_t dim) : LogSoftmaxImpl(LogSoftmaxOptions(dim)) {} explicit LogSoftmaxImpl(const LogSoftmaxOptions& options_); Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `LogSoftmax` module into the given `stream`. void pretty_print(std::ostream& stream) const override; LogSoftmaxOptions options; }; /// A `ModuleHolder` subclass for `LogSoftmaxImpl`. /// See the documentation for `LogSoftmaxImpl` class to learn what methods it /// provides, and examples of how to use `LogSoftmax` with /// `torch::nn::LogSoftmaxOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(LogSoftmax); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softmax2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the Softmax2d function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Softmax2d to learn /// about the exact behavior of this module. class TORCH_API Softmax2dImpl : public torch::nn::Cloneable<Softmax2dImpl> { public: Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `Softmax2d` module into the given `stream`. void pretty_print(std::ostream& stream) const override; }; /// A `ModuleHolder` subclass for `Softmax2dImpl`. /// See the documentation for `Softmax2dImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(Softmax2d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the PReLU function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.PReLU to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::PReLUOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// PReLU model(PReLUOptions().num_parameters(42)); /// ``` class TORCH_API PReLUImpl : public torch::nn::Cloneable<PReLUImpl> { public: explicit PReLUImpl(const PReLUOptions& options_ = {}); Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `PReLU` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. PReLUOptions options; /// The learned weight. Tensor weight; }; /// A `ModuleHolder` subclass for `PReLUImpl`. /// See the documentation for `PReLUImpl` class to learn what methods it /// provides, and examples of how to use `PReLU` with `torch::nn::PReLUOptions`. /// See the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(PReLU); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the ReLU function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ReLU to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::ReLUOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// ReLU model(ReLUOptions().inplace(true)); /// ``` class TORCH_API ReLUImpl : public torch::nn::Cloneable<ReLUImpl> { public: explicit ReLUImpl(const ReLUOptions& options_ = {}); Tensor forward(Tensor input); void reset() override; /// Pretty prints the `ReLU` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. ReLUOptions options; }; /// A `ModuleHolder` subclass for `ReLUImpl`. /// See the documentation for `ReLUImpl` class to learn what methods it /// provides, and examples of how to use `ReLU` with `torch::nn::ReLUOptions`. /// See the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(ReLU); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReLU6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the ReLU6 function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ReLU6 to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::ReLU6Options` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// ReLU6 model(ReLU6Options().inplace(true)); /// ``` class TORCH_API ReLU6Impl : public torch::nn::Cloneable<ReLU6Impl> { public: explicit ReLU6Impl(const ReLU6Options& options_ = {}); Tensor forward(Tensor input); void reset() override; /// Pretty prints the `ReLU6` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. ReLU6Options options; }; /// A `ModuleHolder` subclass for `ReLU6Impl`. /// See the documentation for `ReLU6Impl` class to learn what methods it /// provides, and examples of how to use `ReLU6` with `torch::nn::ReLU6Options`. /// See the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(ReLU6); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the RReLU function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.RReLU to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::RReLUOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// RReLU model(RReLUOptions().lower(0.24).upper(0.42).inplace(true)); /// ``` class TORCH_API RReLUImpl : public torch::nn::Cloneable<RReLUImpl> { public: explicit RReLUImpl(const RReLUOptions& options_ = {}); Tensor forward(Tensor input); void reset() override; /// Pretty prints the `RReLU` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. RReLUOptions options; }; /// A `ModuleHolder` subclass for `RReLUImpl`. /// See the documentation for `RReLUImpl` class to learn what methods it /// provides, and examples of how to use `RReLU` with `torch::nn::RReLUOptions`. /// See the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(RReLU); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies celu over a given input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.CELU to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::CELUOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// CELU model(CELUOptions().alpha(42.42).inplace(true)); /// ``` class TORCH_API CELUImpl : public torch::nn::Cloneable<CELUImpl> { public: explicit CELUImpl(const CELUOptions& options_ = {}); Tensor forward(Tensor input); void reset() override; /// Pretty prints the `CELU` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. CELUOptions options; }; /// A `ModuleHolder` subclass for `CELUImpl`. /// See the documentation for `CELUImpl` class to learn what methods it /// provides, and examples of how to use `CELU` with `torch::nn::CELUOptions`. /// See the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(CELU); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies glu over a given input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.GLU to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::GLUOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// GLU model(GLUOptions(1)); /// ``` class TORCH_API GLUImpl : public torch::nn::Cloneable<GLUImpl> { public: explicit GLUImpl(const GLUOptions& options_ = {}); Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `GLU` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. GLUOptions options; }; /// A `ModuleHolder` subclass for `GLUImpl`. /// See the documentation for `GLUImpl` class to learn what methods it /// provides, and examples of how to use `GLU` with `torch::nn::GLUOptions`. /// See the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(GLU); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies gelu over a given input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.GELU to learn /// about the exact behavior of this module. class TORCH_API GELUImpl : public torch::nn::Cloneable<GELUImpl> { public: explicit GELUImpl(GELUOptions options_ = {}); Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `GELU` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. GELUOptions options; }; /// A `ModuleHolder` subclass for `GELUImpl`. /// See the documentation for `GELUImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(GELU); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SiLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies silu over a given input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.SiLU to learn /// about the exact behavior of this module. class TORCH_API SiLUImpl : public torch::nn::Cloneable<SiLUImpl> { public: Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `SiLU` module into the given `stream`. void pretty_print(std::ostream& stream) const override; }; /// A `ModuleHolder` subclass for `SiLUImpl`. /// See the documentation for `SiLUImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(SiLU); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Mish ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies mish over a given input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Mish to learn /// about the exact behavior of this module. class TORCH_API MishImpl : public torch::nn::Cloneable<MishImpl> { public: Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `Mish` module into the given `stream`. void pretty_print(std::ostream& stream) const override; }; /// A `ModuleHolder` subclass for `MishImpl`. /// See the documentation for `MishImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(Mish); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sigmoid ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies sigmoid over a given input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Sigmoid to learn /// about the exact behavior of this module. class TORCH_API SigmoidImpl : public torch::nn::Cloneable<SigmoidImpl> { public: Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `Sigmoid` module into the given `stream`. void pretty_print(std::ostream& stream) const override; }; /// A `ModuleHolder` subclass for `SigmoidImpl`. /// See the documentation for `SigmoidImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(Sigmoid); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softplus ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies softplus over a given input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Softplus to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::SoftplusOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Softplus model(SoftplusOptions().beta(0.24).threshold(42.42)); /// ``` class TORCH_API SoftplusImpl : public torch::nn::Cloneable<SoftplusImpl> { public: explicit SoftplusImpl(const SoftplusOptions& options_ = {}); Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `Softplus` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. SoftplusOptions options; }; /// A `ModuleHolder` subclass for `SoftplusImpl`. /// See the documentation for `SoftplusImpl` class to learn what methods it /// provides, and examples of how to use `Softplus` with /// `torch::nn::SoftplusOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Softplus); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softshrink ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the soft shrinkage function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Softshrink to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::SoftshrinkOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Softshrink model(SoftshrinkOptions(42.42)); /// ``` class TORCH_API SoftshrinkImpl : public torch::nn::Cloneable<SoftshrinkImpl> { public: explicit SoftshrinkImpl(const SoftshrinkOptions& options_ = {}); Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `Softshrink` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. SoftshrinkOptions options; }; /// A `ModuleHolder` subclass for `SoftshrinkImpl`. /// See the documentation for `SoftshrinkImpl` class to learn what methods it /// provides, and examples of how to use `Softshrink` with /// `torch::nn::SoftshrinkOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Softshrink); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softsign ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies Softsign over a given input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Softsign to learn /// about the exact behavior of this module. class TORCH_API SoftsignImpl : public torch::nn::Cloneable<SoftsignImpl> { public: Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `Softsign` module into the given `stream`. void pretty_print(std::ostream& stream) const override; }; /// A `ModuleHolder` subclass for `SoftsignImpl`. /// See the documentation for `SoftsignImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(Softsign); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tanh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies Tanh over a given input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Tanh to learn /// about the exact behavior of this module. class TORCH_API TanhImpl : public torch::nn::Cloneable<TanhImpl> { public: Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `Tanh` module into the given `stream`. void pretty_print(std::ostream& stream) const override; }; /// A `ModuleHolder` subclass for `TanhImpl`. /// See the documentation for `TanhImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(Tanh); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tanhshrink ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies Tanhshrink over a given input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Tanhshrink to learn /// about the exact behavior of this module. class TORCH_API TanhshrinkImpl : public torch::nn::Cloneable<TanhshrinkImpl> { public: Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `Tanhshrink` module into the given `stream`. void pretty_print(std::ostream& stream) const override; }; /// A `ModuleHolder` subclass for `TanhshrinkImpl`. /// See the documentation for `TanhshrinkImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(Tanhshrink); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Threshold ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the Threshold function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Threshold to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::ThresholdOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Threshold model(ThresholdOptions(42.42, 24.24).inplace(true)); /// ``` class TORCH_API ThresholdImpl : public torch::nn::Cloneable<ThresholdImpl> { public: ThresholdImpl(double threshold, double value) : ThresholdImpl(ThresholdOptions(threshold, value)) {} explicit ThresholdImpl(const ThresholdOptions& options_); Tensor forward(Tensor input); void reset() override; /// Pretty prints the `Threshold` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. ThresholdOptions options; }; /// A `ModuleHolder` subclass for `ThresholdImpl`. /// See the documentation for `ThresholdImpl` class to learn what methods it /// provides, and examples of how to use `Threshold` with /// `torch::nn::ThresholdOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Threshold); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MultiheadAttention ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the MultiheadAttention function element-wise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.MultiheadAttention /// to learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::MultiheadAttentionOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// MultiheadAttention model(MultiheadAttentionOptions(20, 10).bias(false)); /// ``` class TORCH_API MultiheadAttentionImpl : public torch::nn::Cloneable<MultiheadAttentionImpl> { public: MultiheadAttentionImpl(int64_t embed_dim, int64_t num_heads) : MultiheadAttentionImpl( MultiheadAttentionOptions(embed_dim, num_heads)) {} explicit MultiheadAttentionImpl(const MultiheadAttentionOptions& options_); std::tuple<Tensor, Tensor> forward( const Tensor& query, const Tensor& key, const Tensor& value, const Tensor& key_padding_mask = {}, bool need_weights = true, const Tensor& attn_mask = {}, bool average_attn_weights = true); protected: FORWARD_HAS_DEFAULT_ARGS( {3, AnyValue(Tensor())}, {4, AnyValue(true)}, {5, AnyValue(Tensor())}, {6, AnyValue(true)}) public: void reset() override; void _reset_parameters(); /// The options with which this `Module` was constructed. MultiheadAttentionOptions options; bool _qkv_same_embed_dim{}; Tensor in_proj_weight; Tensor in_proj_bias; Tensor bias_k; Tensor bias_v; Linear out_proj = nullptr; Tensor q_proj_weight; Tensor k_proj_weight; Tensor v_proj_weight; int64_t head_dim{}; }; /// A `ModuleHolder` subclass for `MultiheadAttentionImpl`. /// See the documentation for `MultiheadAttentionImpl` class to learn what /// methods it provides, and examples of how to use `MultiheadAttention` with /// `torch::nn::MultiheadAttentionOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(MultiheadAttention); } // namespace torch::nn ```
======================================================================================================================================================== SOURCE CODE FILE: adaptive.h LINES: 1 SIZE: 3.54 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\adaptive.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/cloneable.h> #include <torch/nn/functional/activation.h> #include <torch/nn/module.h> #include <torch/nn/modules/container/modulelist.h> #include <torch/nn/modules/container/sequential.h> #include <torch/nn/modules/linear.h> #include <torch/nn/options/adaptive.h> #include <utility> namespace torch::nn { /// The output of a single invocation of an AdaptiveLogSoftmaxWithLoss /// module's `forward()` method. struct TORCH_API ASMoutput { ASMoutput(Tensor output_, double loss_); /// Tensor containing computed target log probabilities for each example Tensor output; /// Scalar representing the computed negative log likelihood loss double loss; }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveLogSoftmaxWithLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Efficient softmax approximation as described in /// `Efficient softmax approximation for GPUs`_ by Edouard Grave, Armand Joulin, /// Moustapha Cissé, David Grangier, and Hervé Jégou. /// See /// https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveLogSoftmaxWithLoss /// to learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::AdaptiveLogSoftmaxWithLossOptions` /// class to learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// AdaptiveLogSoftmaxWithLoss model(AdaptiveLogSoftmaxWithLossOptions(8, 10, /// {4, 8}).div_value(2.).head_bias(true)); /// ``` class TORCH_API AdaptiveLogSoftmaxWithLossImpl : public Cloneable<AdaptiveLogSoftmaxWithLossImpl> { public: AdaptiveLogSoftmaxWithLossImpl( int64_t in_features, int64_t n_classes, std::vector<int64_t> cutoffs) : AdaptiveLogSoftmaxWithLossImpl(AdaptiveLogSoftmaxWithLossOptions( in_features, n_classes, std::move(cutoffs))) {} explicit AdaptiveLogSoftmaxWithLossImpl( AdaptiveLogSoftmaxWithLossOptions options_); ASMoutput forward(const Tensor& input, const Tensor& target); void reset() override; void reset_parameters(); /// Pretty prints the `AdaptiveLogSoftmaxWithLoss` module into the given /// `stream`. void pretty_print(std::ostream& stream) const override; /// Given input tensor, and output of `head`, computes the log of the full /// distribution Tensor _get_full_log_prob(const Tensor& input, const Tensor& head_output); /// Computes log probabilities for all n_classes Tensor log_prob(const Tensor& input); /// This is equivalent to `log_pob(input).argmax(1)` but is more efficient in /// some cases Tensor predict(const Tensor& input); /// The options with which this `Module` was constructed AdaptiveLogSoftmaxWithLossOptions options; /// Cutoffs used to assign targets to their buckets. It should be an ordered /// Sequence of integers sorted in the increasing order std::vector<int64_t> cutoffs; int64_t shortlist_size; /// Number of clusters int64_t n_clusters; /// Output size of head classifier int64_t head_size; Linear head = nullptr; ModuleList tail; }; /// A `ModuleHolder` subclass for `AdaptiveLogSoftmaxWithLossImpl`. /// See the documentation for `AdaptiveLogSoftmaxWithLossImpl` class to learn /// what methods it provides, and examples of how to use /// `AdaptiveLogSoftmaxWithLoss` with /// `torch::nn::AdaptiveLogSoftmaxWithLossOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(AdaptiveLogSoftmaxWithLoss); } // namespace torch::nn ```
========================================================================================================================================================= SOURCE CODE FILE: batchnorm.h LINES: 1 SIZE: 8.27 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\batchnorm.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/cloneable.h> #include <torch/nn/functional/batchnorm.h> #include <torch/nn/init.h> #include <torch/nn/options/batchnorm.h> #include <torch/nn/pimpl.h> #include <torch/types.h> namespace torch::nn { /// Base class for all (dimension-specialized) batchnorm and instancenorm /// modules. template <size_t D, typename Derived, typename DerivedOptions> class NormImplBase : public torch::nn::Cloneable<Derived> { protected: virtual void _check_input_dim(const Tensor& input) = 0; public: NormImplBase(const DerivedOptions& options_) : options(options_) { NormImplBase::reset(); } void reset() override { if (options.affine()) { weight = this->register_parameter( "weight", torch::empty({options.num_features()})); bias = this->register_parameter( "bias", torch::empty({options.num_features()})); } else { weight = this->register_parameter("weight", Tensor(), /*requires_grad=*/false); bias = this->register_parameter("bias", Tensor(), /*requires_grad=*/false); } if (options.track_running_stats()) { running_mean = this->register_buffer( "running_mean", torch::zeros({options.num_features()})); running_var = this->register_buffer( "running_var", torch::ones({options.num_features()})); num_batches_tracked = this->register_buffer( "num_batches_tracked", torch::tensor(0, torch::dtype(torch::kLong))); } else { running_mean = this->register_buffer("running_mean", Tensor()); running_var = this->register_buffer("running_var", Tensor()); num_batches_tracked = this->register_buffer("num_batches_tracked", Tensor()); } reset_parameters(); } void reset_running_stats() { if (options.track_running_stats()) { running_mean.zero_(); running_var.fill_(1); num_batches_tracked.zero_(); } } void reset_parameters() { reset_running_stats(); if (options.affine()) { torch::nn::init::ones_(weight); torch::nn::init::zeros_(bias); } } /// The options with which this module was constructed. DerivedOptions options; /// The learned weight. /// Only defined if the `affine` option was `true` upon construction. Tensor weight; /// The learned bias. /// Only defined if the `affine` option was `true` upon construction. Tensor bias; /// The running mean. /// Only defined if the `track_running_stats` option was `true` upon /// construction. Tensor running_mean; /// The running variance. /// Only defined if the `track_running_stats` option was `true` upon /// construction. Tensor running_var; /// The number of the forward call. /// Only defined if the `track_running_stats` option was `true` upon /// construction. Tensor num_batches_tracked; }; /// Base class for all (dimension-specialized) batchnorm modules. template <size_t D, typename Derived> class BatchNormImplBase : public NormImplBase<D, Derived, BatchNormOptions> { public: using NormImplBase<D, Derived, BatchNormOptions>::NormImplBase; Tensor forward(const Tensor& input) { this->_check_input_dim(input); double exponential_average_factor = 0.0; if (this->options.momentum().has_value()) { exponential_average_factor = this->options.momentum().value(); } if (this->is_training() && this->options.track_running_stats()) { if (this->num_batches_tracked.defined()) { this->num_batches_tracked += 1; if (this->options.momentum() == std::nullopt) { // use cumulative moving average exponential_average_factor = 1.0 / this->num_batches_tracked.template item<double>(); } else { // use exponential moving average exponential_average_factor = this->options.momentum().value(); } } } return torch::nn::functional::detail::batch_norm( input, this->running_mean, this->running_var, this->weight, this->bias, this->is_training() || !this->options.track_running_stats(), /*momentum=*/exponential_average_factor, this->options.eps()); } /// Pretty prints the `BatchNorm{1,2,3}d` module into the given `stream`. void pretty_print(std::ostream& stream) const override { stream << std::boolalpha << "torch::nn::BatchNorm" << D << "d(" << this->options.num_features() << ", " << "eps=" << this->options.eps() << ", " << "momentum="; if (this->options.momentum().has_value()) { stream << this->options.momentum().value(); } else { stream << "None"; } stream << ", " << "affine=" << this->options.affine() << ", " << "track_running_stats=" << this->options.track_running_stats() << ")"; } }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm1d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the BatchNorm1d function. /// See https://pytorch.org/docs/main/nn.html#torch.nn.BatchNorm1d to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::BatchNorm1dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// BatchNorm1d /// model(BatchNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); /// ``` class TORCH_API BatchNorm1dImpl : public BatchNormImplBase<1, BatchNorm1dImpl> { protected: void _check_input_dim(const Tensor& input) override; public: using BatchNormImplBase<1, BatchNorm1dImpl>::BatchNormImplBase; }; /// A `ModuleHolder` subclass for `BatchNorm1dImpl`. /// See the documentation for `BatchNorm1dImpl` class to learn what methods it /// provides, and examples of how to use `BatchNorm1d` with /// `torch::nn::BatchNorm1dOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(BatchNorm1d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm2d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the BatchNorm2d function. /// See https://pytorch.org/docs/main/nn.html#torch.nn.BatchNorm2d to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::BatchNorm2dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// BatchNorm2d /// model(BatchNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); /// ``` class TORCH_API BatchNorm2dImpl : public BatchNormImplBase<2, BatchNorm2dImpl> { protected: void _check_input_dim(const Tensor& input) override; public: using BatchNormImplBase<2, BatchNorm2dImpl>::BatchNormImplBase; }; /// A `ModuleHolder` subclass for `BatchNorm2dImpl`. /// See the documentation for `BatchNorm2dImpl` class to learn what methods it /// provides, and examples of how to use `BatchNorm2d` with /// `torch::nn::BatchNorm2dOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(BatchNorm2d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm3d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the BatchNorm3d function. /// See https://pytorch.org/docs/main/nn.html#torch.nn.BatchNorm3d to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::BatchNorm3dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// BatchNorm3d /// model(BatchNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); /// ``` class TORCH_API BatchNorm3dImpl : public BatchNormImplBase<3, BatchNorm3dImpl> { protected: void _check_input_dim(const Tensor& input) override; public: using BatchNormImplBase<3, BatchNorm3dImpl>::BatchNormImplBase; }; /// A `ModuleHolder` subclass for `BatchNorm3dImpl`. /// See the documentation for `BatchNorm3dImpl` class to learn what methods it /// provides, and examples of how to use `BatchNorm3d` with /// `torch::nn::BatchNorm3dOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(BatchNorm3d); } // namespace torch::nn ```
====================================================================================================================================================== SOURCE CODE FILE: common.h LINES: 1 SIZE: 4.34 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\common.h ENCODING: utf-8 ```h #pragma once /// This macro enables a module with default arguments in its forward method /// to be used in a Sequential module. /// /// Example usage: /// /// Let's say we have a module declared like this: /// ``` /// struct MImpl : torch::nn::Module { /// public: /// explicit MImpl(int value_) : value(value_) {} /// torch::Tensor forward(int a, int b = 2, double c = 3.0) { /// return torch::tensor(a + b + c); /// } /// private: /// int value; /// }; /// TORCH_MODULE(M); /// ``` /// /// If we try to use it in a Sequential module and run forward: /// ``` /// torch::nn::Sequential seq(M(1)); /// seq->forward(1); /// ``` /// /// We will receive the following error message: /// ``` /// MImpl's forward() method expects 3 argument(s), but received 1. /// If MImpl's forward() method has default arguments, please make sure /// the forward() method is declared with a corresponding /// `FORWARD_HAS_DEFAULT_ARGS` macro. /// ``` /// /// The right way to fix this error is to use the `FORWARD_HAS_DEFAULT_ARGS` /// macro when declaring the module: /// ``` /// struct MImpl : torch::nn::Module { /// public: /// explicit MImpl(int value_) : value(value_) {} /// torch::Tensor forward(int a, int b = 2, double c = 3.0) { /// return torch::tensor(a + b + c); /// } /// protected: /// /* /// NOTE: looking at the argument list of `forward`: /// `forward(int a, int b = 2, double c = 3.0)` /// we saw the following default arguments: /// ---------------------------------------------------------------- /// 0-based index of default | Default value of arg /// arg in forward arg list | (wrapped by `torch::nn::AnyValue()`) /// ---------------------------------------------------------------- /// 1 | torch::nn::AnyValue(2) /// 2 | torch::nn::AnyValue(3.0) /// ---------------------------------------------------------------- /// Thus we pass the following arguments to the `FORWARD_HAS_DEFAULT_ARGS` /// macro: /// */ /// FORWARD_HAS_DEFAULT_ARGS({1, torch::nn::AnyValue(2)}, {2, /// torch::nn::AnyValue(3.0)}) /// private: /// int value; /// }; /// TORCH_MODULE(M); /// ``` /// Now, running the following would work: /// ``` /// torch::nn::Sequential seq(M(1)); /// seq->forward(1); // This correctly populates the default arguments for /// `MImpl::forward` /// ``` #define FORWARD_HAS_DEFAULT_ARGS(...) \ template <typename ModuleType, typename... ArgumentTypes> \ friend struct torch::nn::AnyModuleHolder; \ bool _forward_has_default_args() override { \ return true; \ } \ unsigned int _forward_num_required_args() override { \ std::vector<std::pair<unsigned int, torch::nn::AnyValue>> args_info{ \ __VA_ARGS__}; \ return std::begin(args_info)->first; \ } \ std::vector<torch::nn::AnyValue> _forward_populate_default_args( \ std::vector<torch::nn::AnyValue>&& arguments) override { \ std::vector<std::pair<unsigned int, torch::nn::AnyValue>> args_info{ \ __VA_ARGS__}; \ unsigned int num_all_args = std::rbegin(args_info)->first + 1; \ TORCH_INTERNAL_ASSERT( \ arguments.size() >= _forward_num_required_args() && \ arguments.size() <= num_all_args); \ std::vector<torch::nn::AnyValue> ret = std::move(arguments); \ ret.reserve(num_all_args); \ for (auto& arg_info : args_info) { \ if (arg_info.first > ret.size() - 1) \ ret.emplace_back(std::move(arg_info.second)); \ } \ return ret; \ } ```
============================================================================================================================================================= SOURCE CODE FILE: any.h LINES: 1 SIZE: 13.45 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\container\any.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/module.h> #include <torch/nn/modules/container/any_module_holder.h> #include <torch/types.h> #include <memory> #include <type_traits> #include <utility> #include <vector> namespace torch::nn { /// Stores a type erased `Module`. /// /// The PyTorch C++ API does not impose an interface on the signature of /// `forward()` in `Module` subclasses. This gives you complete freedom to /// design your `forward()` methods to your liking. However, this also means /// there is no unified base type you could store in order to call `forward()` /// polymorphically for any module. This is where the `AnyModule` comes in. /// Instead of inheritance, it relies on type erasure for polymorphism. /// /// An `AnyModule` can store any `nn::Module` subclass that provides a /// `forward()` method. This `forward()` may accept any types and return any /// type. Once stored in an `AnyModule`, you can invoke the underlying module's /// `forward()` by calling `AnyModule::forward()` with the arguments you would /// supply to the stored module (though see one important limitation below). /// Example: /// /// \rst /// .. code-block:: cpp /// /// struct GenericTrainer { /// torch::nn::AnyModule module; /// /// void train(torch::Tensor input) { /// module.forward(input); /// } /// }; /// /// GenericTrainer trainer1{torch::nn::Linear(3, 4)}; /// GenericTrainer trainer2{torch::nn::Conv2d(3, 4, 2)}; /// \endrst /// /// As `AnyModule` erases the static type of the stored module (and its /// `forward()` method) to achieve polymorphism, type checking of arguments is /// moved to runtime. That is, passing an argument with an incorrect type to an /// `AnyModule` will compile, but throw an exception at runtime: /// /// \rst /// .. code-block:: cpp /// /// torch::nn::AnyModule module(torch::nn::Linear(3, 4)); /// // Linear takes a tensor as input, but we are passing an integer. /// // This will compile, but throw a `torch::Error` exception at runtime. /// module.forward(123); /// \endrst /// /// \rst /// .. attention:: /// One noteworthy limitation of `AnyModule` is that its `forward()` method /// does not support implicit conversion of argument types. For example, if /// the stored module's `forward()` method accepts a `float` and you call /// `any_module.forward(3.4)` (where `3.4` is a `double`), this will throw /// an exception. /// \endrst /// /// The return type of the `AnyModule`'s `forward()` method is controlled via /// the first template argument to `AnyModule::forward()`. It defaults to /// `torch::Tensor`. To change it, you can write `any_module.forward<int>()`, /// for example. /// /// \rst /// .. code-block:: cpp /// /// torch::nn::AnyModule module(torch::nn::Linear(3, 4)); /// auto output = module.forward(torch::ones({2, 3})); /// /// struct IntModule { /// int forward(int x) { return x; } /// }; /// torch::nn::AnyModule module(IntModule{}); /// int output = module.forward<int>(5); /// \endrst /// /// The only other method an `AnyModule` provides access to on the stored /// module is `clone()`. However, you may acquire a handle on the module via /// `.ptr()`, which returns a `shared_ptr<nn::Module>`. Further, if you know /// the concrete type of the stored module, you can get a concrete handle to it /// using `.get<T>()` where `T` is the concrete module type. /// /// \rst /// .. code-block:: cpp /// /// torch::nn::AnyModule module(torch::nn::Linear(3, 4)); /// std::shared_ptr<nn::Module> ptr = module.ptr(); /// torch::nn::Linear linear(module.get<torch::nn::Linear>()); /// \endrst class AnyModule { public: /// A default-constructed `AnyModule` is in an empty state. AnyModule() = default; /// Constructs an `AnyModule` from a `shared_ptr` to concrete module object. template <typename ModuleType> explicit AnyModule(std::shared_ptr<ModuleType> module); /// Constructs an `AnyModule` from a concrete module object. template < typename ModuleType, typename = torch::detail::enable_if_module_t<ModuleType>> explicit AnyModule(ModuleType&& module); /// Constructs an `AnyModule` from a module holder. template <typename ModuleType> explicit AnyModule(const ModuleHolder<ModuleType>& module_holder); /// Move construction and assignment is allowed, and follows the default /// behavior of move for `std::unique_ptr`. AnyModule(AnyModule&&) = default; AnyModule& operator=(AnyModule&&) = default; /// Creates a shallow copy of an `AnyModule`. AnyModule(const AnyModule& other); AnyModule& operator=(const AnyModule& other); /// Creates a deep copy of an `AnyModule` if it contains a module, else an /// empty `AnyModule` if it is empty. AnyModule clone(std::optional<Device> device = std::nullopt) const; /// Assigns a module to the `AnyModule` (to circumvent the explicit /// constructor). template <typename ModuleType> AnyModule& operator=(std::shared_ptr<ModuleType> module); /// Invokes `forward()` on the contained module with the given arguments, and /// returns the return value as an `AnyValue`. Use this method when chaining /// `AnyModule`s in a loop. template <typename... ArgumentTypes> AnyValue any_forward(ArgumentTypes&&... arguments); /// Invokes `forward()` on the contained module with the given arguments, and /// casts the returned `AnyValue` to the supplied `ReturnType` (which defaults /// to `torch::Tensor`). template <typename ReturnType = torch::Tensor, typename... ArgumentTypes> ReturnType forward(ArgumentTypes&&... arguments); /// Attempts to cast the underlying module to the given module type. Throws an /// exception if the types do not match. template <typename T, typename = torch::detail::enable_if_module_t<T>> T& get(); /// Attempts to cast the underlying module to the given module type. Throws an /// exception if the types do not match. template <typename T, typename = torch::detail::enable_if_module_t<T>> const T& get() const; /// Returns the contained module in a `nn::ModuleHolder` subclass if possible /// (i.e. if `T` has a constructor for the underlying module type). template <typename T, typename ContainedType = typename T::ContainedType> T get() const; /// Returns a `std::shared_ptr` whose dynamic type is that of the underlying /// module. std::shared_ptr<Module> ptr() const; /// Like `ptr()`, but casts the pointer to the given type. template <typename T, typename = torch::detail::enable_if_module_t<T>> std::shared_ptr<T> ptr() const; /// Returns the `type_info` object of the contained value. const std::type_info& type_info() const; /// Returns true if the `AnyModule` does not contain a module. bool is_empty() const noexcept; private: /// Creates a `unique_ptr<AnyModulePlaceholder>` pointing to a /// `AnyModuleHolder` of the correct type. This method is used to deduce the /// arguments of the module's `forward()` method. template < typename ModuleType, typename Class, typename ReturnType, typename... ArgumentTypes> std::unique_ptr<AnyModulePlaceholder> make_holder( std::shared_ptr<ModuleType>&& module, ReturnType (Class::*)(ArgumentTypes...)); /// Helper method invoked by const and non-const `get()`. template <typename ModuleType, typename ReturnType, typename... ArgumentTypes> ModuleType& get_(ReturnType (ModuleType::*)(ArgumentTypes...)) const; /// Helper method invoked by const and non-const `get()`. template <typename ModuleType> ModuleType& get_() const; /// The type erased module. std::unique_ptr<AnyModulePlaceholder> content_; }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModule ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename ModuleType> AnyModule::AnyModule(std::shared_ptr<ModuleType> module) : content_(make_holder( std::move(module), &std::remove_reference_t<ModuleType>::forward)) { // `AnyModule` can only store an `nn::Module` subclass object that provides // a `forward()` method that has a non-templatized return type. // (e.g. `AnyModule` cannot store `nn::Sequential`, because `nn::Sequential`'s // `forward()` method has a templatized return type.) static_assert( torch::detail::is_module<ModuleType>::value, "Can only store object derived from nn::Module into AnyModule"); static_assert( torch::detail::has_forward<ModuleType>::value, "Can only store module with a forward() method that has a non-templatized" " argument type and return type into AnyModule (e.g. we cannot store nn::Sequential" "into AnyModule, because its forward() method's argument type and return type are templatized." " If you need to use nn::Sequentials inside each other you can subclass " "nn::Sequential and write a non-templatized forward function for it. You can checkout " "https://github.com/pytorch/vision/blob/2f46070f3cb1ea894d82578f3dc5677f82f34958/torchvision/csrc/models/mnasnet.cpp#L59 " "for an example on how to do this.)."); } template <typename ModuleType, typename> AnyModule::AnyModule(ModuleType&& module) : AnyModule( std::make_shared<ModuleType>(std::forward<ModuleType>(module))) {} template <typename ModuleType> AnyModule::AnyModule(const ModuleHolder<ModuleType>& module_holder) : AnyModule(module_holder.ptr()) {} inline AnyModule::AnyModule(const AnyModule& other) : content_(other.content_ ? other.content_->copy() : nullptr) {} inline AnyModule& AnyModule::operator=(const AnyModule& other) { if (this != &other) { content_ = other.content_ ? other.content_->copy() : nullptr; } return *this; } inline AnyModule AnyModule::clone(std::optional<Device> device) const { AnyModule clone; clone.content_ = content_ ? content_->clone_module(device) : nullptr; return clone; } template <typename ModuleType> AnyModule& AnyModule::operator=(std::shared_ptr<ModuleType> module) { *this = AnyModule(std::move(module)); return *this; } template <typename... ArgumentTypes> AnyValue AnyModule::any_forward(ArgumentTypes&&... arguments) { TORCH_CHECK(!is_empty(), "Cannot call forward() on an empty AnyModule"); std::vector<AnyValue> values; values.reserve(sizeof...(ArgumentTypes)); torch::apply( [&values](AnyValue&& value) { values.push_back(std::move(value)); }, AnyValue(std::forward<ArgumentTypes>(arguments))...); return content_->forward(std::move(values)); } template <typename ReturnType, typename... ArgumentTypes> ReturnType AnyModule::forward(ArgumentTypes&&... arguments) { return any_forward(std::forward<ArgumentTypes>(arguments)...) .template get<ReturnType>(); } template <typename T, typename> T& AnyModule::get() { TORCH_CHECK(!is_empty(), "Cannot call get() on an empty AnyModule"); return get_<T>(); } template <typename T, typename> const T& AnyModule::get() const { TORCH_CHECK(!is_empty(), "Cannot call get() on an empty AnyModule"); return get_<T>(); } template <typename T, typename ContainedType> T AnyModule::get() const { return T(ptr<ContainedType>()); } inline std::shared_ptr<Module> AnyModule::ptr() const { TORCH_CHECK(!is_empty(), "Cannot call ptr() on an empty AnyModule"); return content_->ptr(); } template <typename T, typename> std::shared_ptr<T> AnyModule::ptr() const { TORCH_CHECK(!is_empty(), "Cannot call ptr() on an empty AnyModule"); // Call get() but discard the value, just to do the type checking. get_<T>(); return std::dynamic_pointer_cast<T>(ptr()); } inline const std::type_info& AnyModule::type_info() const { TORCH_CHECK(!is_empty(), "Cannot call type_info() on an empty AnyModule"); return content_->type_info; } inline bool AnyModule::is_empty() const noexcept { return content_ == nullptr; } // Private Methods template < typename ModuleType, typename Class, typename ReturnType, typename... ArgumentTypes> std::unique_ptr<AnyModulePlaceholder> AnyModule::make_holder( std::shared_ptr<ModuleType>&& module, ReturnType (Class::*)(ArgumentTypes...)) { static_assert( torch::detail::check_not_lvalue_references<ArgumentTypes...>(), "Modules stored inside AnyModule must not take references. " "Use pointers instead."); static_assert( !std::is_void_v<ReturnType>, "AnyModule cannot store modules that return void " "(you can return a dummy value)."); return std::make_unique< AnyModuleHolder<std::decay_t<ModuleType>, ArgumentTypes...>>( std::move(module)); } template <typename ModuleType> ModuleType& AnyModule::get_() const { using M = std::remove_reference_t<ModuleType>; static_assert( torch::detail::has_forward<M>::value, "Can only call AnyModule::get<T> with a type T that has a forward method"); return get_(&M::forward); } template <typename ModuleType, typename ReturnType, typename... ArgumentTypes> ModuleType& AnyModule::get_( ReturnType (ModuleType::*)(ArgumentTypes...)) const { if (typeid(ModuleType).hash_code() == type_info().hash_code()) { return *static_cast<AnyModuleHolder<ModuleType, ArgumentTypes...>&>( *content_) .module; } TORCH_CHECK( false, "Attempted to cast module of type ", c10::demangle(type_info().name()), " to type ", c10::demangle(typeid(ModuleType).name())); } } // namespace torch::nn ```
=========================================================================================================================================================================== SOURCE CODE FILE: any_module_holder.h LINES: 1 SIZE: 5.01 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\container\any_module_holder.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/utils/variadic.h> #include <torch/nn/modules/container/any_value.h> namespace torch::nn { class Module; // ~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModulePlaceholder ~~~~~~~~~~~~~~~~~~~~~~~~~~ /// The static type of the object we store in the `AnyModule`, which erases /// the actual type, but allows us to call `forward()` on the underlying /// module. struct AnyModulePlaceholder : public AnyValue::Placeholder { using AnyValue::Placeholder::Placeholder; /// The "erased" `forward()` method. virtual AnyValue forward(std::vector<AnyValue>&& arguments) = 0; /// Returns std::shared_ptr<Module> pointing to the erased module. virtual std::shared_ptr<Module> ptr() = 0; /// Returns a `AnyModulePlaceholder` with a shallow copy of this `AnyModule`. virtual std::unique_ptr<AnyModulePlaceholder> copy() const = 0; /// Returns a `AnyModulePlaceholder` with a deep copy of this `AnyModule`. virtual std::unique_ptr<AnyModulePlaceholder> clone_module( std::optional<Device> device) const = 0; }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModuleHolder ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// The dynamic type of the object stored in the `AnyModule`. It contains the /// concrete instance to which all calls are forwarded. It is parameterized /// over the concrete type of the module, and the types of the arguments the /// module takes in its `forward()` method. template <typename ModuleType, typename... ArgumentTypes> struct AnyModuleHolder : public AnyModulePlaceholder { /// \internal struct CheckedGetter { template <typename T> std::decay_t<T>&& operator()(size_t index) { AT_ASSERT(index < arguments_.size()); auto& value = arguments_[index]; if (auto* maybe_value = value.template try_get<std::decay_t<T>>()) { return std::move(*maybe_value); } TORCH_CHECK( false, "Expected argument #", index, " to be of type ", c10::demangle(typeid(T).name()), ", but received value of type ", c10::demangle(value.type_info().name())); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) std::vector<AnyValue>& arguments_; }; /// \internal struct InvokeForward { template <typename... Ts> AnyValue operator()(Ts&&... ts) { return AnyValue(module_->forward(std::forward<Ts>(ts)...)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) std::shared_ptr<ModuleType>& module_; }; /// Constructs the `AnyModuleHolder` from a concrete module. explicit AnyModuleHolder(std::shared_ptr<ModuleType>&& module_) : AnyModulePlaceholder(typeid(ModuleType)), module(std::move(module_)) {} /// Calls `forward()` on the underlying module, casting each `AnyValue` in the /// argument vector to a concrete value. AnyValue forward(std::vector<AnyValue>&& arguments) override { if (module->_forward_has_default_args()) { TORCH_CHECK( arguments.size() >= module->_forward_num_required_args() && arguments.size() <= sizeof...(ArgumentTypes), c10::demangle(type_info.name()), "'s forward() method expects at least ", module->_forward_num_required_args(), " argument(s) and at most ", sizeof...(ArgumentTypes), " argument(s), but received ", arguments.size(), "."); arguments = std::move( module->_forward_populate_default_args(std::move(arguments))); } else { std::string use_default_args_macro_prompt = " If " + c10::demangle(type_info.name()) + "'s forward() method has default arguments, " + "please make sure the forward() method is declared with a corresponding `FORWARD_HAS_DEFAULT_ARGS` macro."; TORCH_CHECK( arguments.size() == sizeof...(ArgumentTypes), c10::demangle(type_info.name()), "'s forward() method expects ", sizeof...(ArgumentTypes), " argument(s), but received ", arguments.size(), ".", (arguments.size() < sizeof...(ArgumentTypes)) ? use_default_args_macro_prompt : ""); } // FYI: During invocation of a module's `forward()` method, the values live // in the `arguments` vector inside this function. return torch::unpack<AnyValue, ArgumentTypes...>( InvokeForward{module}, CheckedGetter{arguments}); } std::shared_ptr<Module> ptr() override { return module; } std::unique_ptr<AnyModulePlaceholder> copy() const override { return std::make_unique<AnyModuleHolder>(*this); } std::unique_ptr<AnyModulePlaceholder> clone_module( std::optional<Device> device) const override { return std::make_unique<AnyModuleHolder>( std::dynamic_pointer_cast<ModuleType>(module->clone(device))); } /// The actual concrete module instance. std::shared_ptr<ModuleType> module; }; } // namespace torch::nn ```
=================================================================================================================================================================== SOURCE CODE FILE: any_value.h LINES: 1 SIZE: 4.16 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\container\any_value.h ENCODING: utf-8 ```h #pragma once #include <torch/types.h> #include <memory> #include <type_traits> #include <typeinfo> #include <utility> namespace torch::nn { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyValue ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// An implementation of `std::any` which stores /// a type erased object, whose concrete value can be retrieved at runtime by /// checking if the `typeid()` of a requested type matches the `typeid()` of /// the object stored. class AnyValue { public: /// Move construction and assignment is allowed, and follows the default /// behavior of move for `std::unique_ptr`. AnyValue(AnyValue&&) = default; AnyValue& operator=(AnyValue&&) = default; ~AnyValue() = default; /// Copy construction and assignment is allowed. AnyValue(const AnyValue& other) : content_(other.content_->clone()) {} AnyValue& operator=(const AnyValue& other) { content_ = other.content_->clone(); return *this; } /// Constructs the `AnyValue` from value type. template < typename T, typename = std::enable_if_t<!std::is_same_v<T, AnyValue>>> explicit AnyValue(T&& value) : content_( std::make_unique<Holder<std::decay_t<T>>>(std::forward<T>(value))) { } /// Returns a pointer to the value contained in the `AnyValue` if the type /// passed as template parameter matches the type of the value stored, and /// returns a null pointer otherwise. template <typename T> T* try_get() { static_assert( !std::is_reference_v<T>, "AnyValue stores decayed types, you cannot cast it to a reference type"); static_assert( !std::is_array_v<T>, "AnyValue stores decayed types, you must cast it to T* instead of T[]"); if (typeid(T).hash_code() == type_info().hash_code()) { return &static_cast<Holder<T>&>(*content_).value; } return nullptr; } /// Returns the value contained in the `AnyValue` if the type passed as /// template parameter matches the type of the value stored, and throws an /// exception otherwise. template <typename T> T get() { if (auto* maybe_value = try_get<T>()) { return *maybe_value; } TORCH_CHECK( false, "Attempted to cast AnyValue to ", c10::demangle(typeid(T).name()), ", but its actual type is ", c10::demangle(type_info().name())); } /// Returns the `type_info` object of the contained value. const std::type_info& type_info() const noexcept { return content_->type_info; } private: friend struct AnyModulePlaceholder; friend struct TestAnyValue; /// \internal /// The static type of the object we store in the `AnyValue`, which erases the /// actual object's type, allowing us only to check the `type_info` of the /// type stored in the dynamic type. struct Placeholder { explicit Placeholder(const std::type_info& type_info_) noexcept : type_info(type_info_) {} Placeholder(const Placeholder&) = default; Placeholder(Placeholder&&) = default; Placeholder& operator=(const Placeholder&) = delete; Placeholder& operator=(Placeholder&&) = delete; virtual ~Placeholder() = default; virtual std::unique_ptr<Placeholder> clone() const { TORCH_CHECK(false, "clone() should only be called on `AnyValue::Holder`"); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) const std::type_info& type_info; }; /// \internal /// The dynamic type of the object we store in the `AnyValue`, which hides the /// actual object we have erased in this `AnyValue`. template <typename T> struct Holder : public Placeholder { /// A template because T&& would not be universal reference here. template < typename U, typename = std::enable_if_t<!std::is_same_v<U, Holder>>> explicit Holder(U&& value_) noexcept : Placeholder(typeid(T)), value(std::forward<U>(value_)) {} std::unique_ptr<Placeholder> clone() const override { return std::make_unique<Holder<T>>(value); } T value; }; /// The type erased object. std::unique_ptr<Placeholder> content_; }; } // namespace torch::nn ```
==================================================================================================================================================================== SOURCE CODE FILE: functional.h LINES: 1 SIZE: 3.36 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\container\functional.h ENCODING: utf-8 ```h #pragma once #include <torch/csrc/Export.h> #include <torch/nn/cloneable.h> #include <torch/types.h> #include <functional> #include <utility> namespace torch::nn { /// Wraps a function in a `Module`. /// /// The `Functional` module allows wrapping an arbitrary function or function /// object in an `nn::Module`. This is primarily handy for usage in /// `Sequential`. /// /// \rst /// .. code-block:: cpp /// /// Sequential sequential( /// Linear(3, 4), /// Functional(torch::relu), /// BatchNorm1d(3), /// Functional(torch::elu, /*alpha=*/1)); /// \endrst /// /// While a `Functional` module only accepts a single `Tensor` as input, it is /// possible for the wrapped function to accept further arguments. However, /// these have to be bound *at construction time*. For example, if /// you want to wrap `torch::leaky_relu`, which accepts a `slope` scalar as its /// second argument, with a particular value for its `slope` in a `Functional` /// module, you could write /// /// \rst /// .. code-block:: cpp /// /// Functional(torch::leaky_relu, /*slope=*/0.5) /// \endrst /// /// The value of `0.5` is then stored within the `Functional` object and /// supplied to the function call at invocation time. Note that such bound /// values are evaluated eagerly and stored a single time. See the documentation /// of [std::bind](https://en.cppreference.com/w/cpp/utility/functional/bind) /// for more information on the semantics of argument binding. /// /// \rst /// .. attention:: /// After passing any bound arguments, the function must accept a single /// tensor and return a single tensor. /// \endrst /// /// Note that `Functional` overloads the call operator (`operator()`) such that /// you can invoke it with `my_func(...)`. class TORCH_API FunctionalImpl : public torch::nn::Cloneable<FunctionalImpl> { public: using Function = std::function<Tensor(Tensor)>; /// Constructs a `Functional` from a function object. explicit FunctionalImpl(Function function); template < typename SomeFunction, typename... Args, typename = std::enable_if_t<(sizeof...(Args) > 0)>> explicit FunctionalImpl(SomeFunction original_function, Args&&... args) // NOLINTNEXTLINE(modernize-avoid-bind) : function_(std::bind( original_function, /*input=*/std::placeholders::_1, std::forward<Args>(args)...)) { // std::bind is normally evil, but (1) gcc is broken w.r.t. handling // parameter pack expansion in lambdas and (2) moving parameter packs into // a lambda only works with C++14, so std::bind is the more move-aware // solution here. } void reset() override; /// Pretty prints the `Functional` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// Forwards the `input` tensor to the underlying (bound) function object. Tensor forward(Tensor input); /// Calls forward(input). Tensor operator()(Tensor input); bool is_serializable() const override; private: Function function_; }; /// A `ModuleHolder` subclass for `FunctionalImpl`. /// See the documentation for `FunctionalImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(Functional); } // namespace torch::nn ```
==================================================================================================================================================================== SOURCE CODE FILE: moduledict.h LINES: 1 SIZE: 8.48 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\container\moduledict.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/cloneable.h> #include <torch/nn/module.h> #include <torch/ordered_dict.h> #include <vector> namespace torch::nn { /// An OrderedDict of `Module`s that registers its elements by their `key`s. /// /// \rst /// .. code-block:: cpp /// /// torch::OrderedDict<std::string, std::shared_ptr<Module>> ordereddict = { /// {"linear", Linear(10, 3).ptr()}, /// {"conv", Conv2d(1, 2, 3).ptr()}, /// {"dropout", Dropout(0.5).ptr()}, /// }; /// torch::nn::ModuleDict dict1(ordereddict); /// /// for (const auto &module : *dict1) { /// module->pretty_print(std::cout); /// } /// /// std::vector<std::pair<std::string, std::shared_ptr<Module>>> list = { /// {"linear", Linear(10, 3).ptr()}, /// {"conv", Conv2d(1, 2, 3).ptr()}, /// {"dropout", Dropout(0.5).ptr()}, /// }; /// torch::nn::ModuleDict dict2(list); /// /// for (const auto &module : *dict2) { /// module->pretty_print(std::cout); /// } /// /// \endrst /// /// Why should you use `ModuleDict` instead of a simple `map` or `OrderedDict`? /// The value a `ModuleDict` provides over manually calling an ordered map of /// modules is that it allows treating the whole container *as a single module*, /// such that performing a transformation on the `ModuleDict` applies to each of /// the modules it stores (which are each a registered submodule of the /// `ModuleDict`). For example, calling `.to(torch::kCUDA)` on a `ModuleDict` /// will move each module in the map to CUDA memory. For example: /// /// \rst /// .. code-block:: cpp /// /// torch::OrderedDict<std::string, std::shared_ptr<Module>> ordereddict = { /// {"linear", Linear(10, 3).ptr()}, /// {"conv", Conv2d(1, 2, 3).ptr()}, /// {"dropout", Dropout(0.5).ptr()}, /// }; /// torch::nn::ModuleDict dict(ordereddict); /// /// // Convert all modules to CUDA. /// dict->to(torch::kCUDA); /// /// \endrst /// /// Finally, `ModuleDict` provides a lightweight container API, such as allowing /// iteration over submodules, positional access, adding new modules from a /// vector of key-module pairs or an `OrderedDict` or another `ModuleDict` after /// construction via `update`. class ModuleDictImpl : public Cloneable<ModuleDictImpl> { public: using Iterator = torch::OrderedDict<std::string, std::shared_ptr<Module>>::Iterator; using ConstIterator = torch::OrderedDict<std::string, std::shared_ptr<Module>>::ConstIterator; ModuleDictImpl() = default; /// Constructs the `ModuleDict` from a list of string-Module pairs. explicit ModuleDictImpl( const std::vector<std::pair<std::string, std::shared_ptr<Module>>>& modules) { update(modules); } /// Constructs the `ModuleDict` from an `OrderedDict`. explicit ModuleDictImpl( const torch::OrderedDict<std::string, std::shared_ptr<Module>>& modules) { update(modules); } /// Return the items in the `ModuleDict`. std::vector<std::pair<std::string, std::shared_ptr<Module>>> items() const { return modules_.pairs(); } /// Return the keys in the `ModuleDict`. std::vector<std::string> keys() const { return modules_.keys(); } /// Return the values in the `ModuleDict`. std::vector<std::shared_ptr<Module>> values() const { return modules_.values(); } /// Return an iterator to the start of `ModuleDict`. Iterator begin() { return modules_.begin(); } /// Return a const iterator to the start of `ModuleDict`. ConstIterator begin() const { return modules_.begin(); } /// Return an iterator to the end of `ModuleDict`. Iterator end() { return modules_.end(); } /// Return a const iterator to the end of `ModuleDict`. ConstIterator end() const { return modules_.end(); } /// Return the number of items currently stored in the `ModuleDict`. size_t size() const noexcept { return modules_.size(); } /// Return true if the `ModuleDict` is empty, otherwise return false. bool empty() const noexcept { return modules_.is_empty(); } /// Check if the centain parameter with the key in the `ModuleDict`. bool contains(const std::string& key) const noexcept { return modules_.contains(key); } /// Remove all items from the `ModuleDict`. void clear() { // Not remove the registration of modules to make it consistent with python // version. modules_.clear(); } /// Special cloning function for `ModuleDict` because it does not use /// `reset()`. std::shared_ptr<Module> clone( const std::optional<Device>& device = std::nullopt) const override { auto clone = std::make_shared<ModuleDictImpl>(); for (const auto& module : modules_) { clone->insert(module.key(), module.value()->clone(device)); } return clone; } /// `reset()` is empty for `ModuleDict`, since it does not have parameters of /// its own. void reset() override {} /// Pretty prints the `ModuleDict` into the given `stream`. void pretty_print(std::ostream& stream) const override { stream << "torch::nn::ModuleDict"; } /// Attempts to returns the `Module` associated with the given `key`. Throws /// an exception if no such `key` is stored in the `ModuleDict`. Check /// contains(key) before for a non-throwing way of access. std::shared_ptr<Module> operator[](const std::string& key) const { return modules_[key]; } /// Attempts to return the module at the given key as the requested type. /// Throws an exception if no such `key` is stored in the `ModuleDict`. /// Check contains(key) before for a non-throwing way of access. template <typename T> T& at(const std::string& key) { static_assert( torch::detail::is_module<T>::value, "Can only call ModuleList::at with an nn::Module type"); auto module = modules_[key]->as<T>(); TORCH_CHECK( module, "Unable to cast module[", key, "] to ", c10::demangle(typeid(T).name())); return *module; } /// Attempts to return the module at the given key as the requested type. /// Throws an exception if no such `key` is stored in the `ModuleDict`. /// Check contains(key) before for a non-throwing way of access. template <typename T> const T& at(const std::string& key) const { static_assert( torch::detail::is_module<T>::value, "Can only call ModuleList::at with an nn::Module type"); const auto module = modules_[key]->as<T>(); TORCH_CHECK( module, "Unable to cast module[", key, "] to ", c10::demangle(typeid(T).name())); return *module; } /// Removes and returns the `Module` associated with the given `key`. /// Throws an exception if no such `key` is stored in the `ModuleDict`. /// Check contains(key) before for a non-throwing way of access. std::shared_ptr<Module> pop(const std::string& key) { auto module = modules_[key]; modules_.erase(key); // Not remove the registration of the module to make it consistent with // python version. return module; } /// Updated the `ModuleDict` with a vector of key-module pairs. void update( const std::vector<std::pair<std::string, std::shared_ptr<Module>>>& modules) { for (auto& item : modules) { insert(item.first, item.second); } } /// Updated the `ModuleDict` with key-value pairs from `OrderedDict` or /// `ModuleDict`. template <typename Container> void update(const Container& container) { for (auto& item : container) { insert(item.key(), item.value()); } } private: /// Private `OrderedDict` holding the key-Module pairs. torch::OrderedDict<std::string, std::shared_ptr<Module>> modules_; /// Insert a key-module pair by overwriting existing keys, /// and register or replace the `Module`. void insert(const std::string& key, std::shared_ptr<Module> module) { if (contains(key)) { modules_[key] = std::move(module); replace_module(key, modules_[key]); } else { modules_.insert(key, std::move(module)); register_module(key, modules_.back().value()); } } }; /// A `ModuleHolder` subclass for `ModuleDictImpl`. /// See the documentation for `ModuleDictImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(ModuleDict); } // namespace torch::nn ```
==================================================================================================================================================================== SOURCE CODE FILE: modulelist.h LINES: 1 SIZE: 9.00 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\container\modulelist.h ENCODING: utf-8 ```h #pragma once #include <c10/util/irange.h> #include <torch/nn/cloneable.h> #include <torch/nn/module.h> #include <utility> #include <vector> namespace torch::nn { /// A list of `Module`s that registers its elements. /// /// \rst /// .. code-block:: cpp /// /// torch::nn::ModuleList mlist( /// torch::nn::Linear(3, 4), /// torch::nn::BatchNorm1d(4), /// torch::nn::Dropout(0.5) /// ); /// /// for (const auto &module : *mlist) { /// module->pretty_print(std::cout); /// } /// /// \endrst /// /// Why should you use `ModuleList` instead of a simple `std::vector`? The value /// a `ModuleList` provides over manually calling a sequence of modules is that /// it allows treating the whole container *as a single module*, such that /// performing a transformation on the `ModuleList` applies to each of the /// modules it stores (which are each a registered submodule of the /// `ModuleList`). For example, calling /// `.to(torch::kCUDA)` on a `ModuleList` will move each module in the list to /// CUDA memory. For example: /// /// \rst /// .. code-block:: cpp /// /// torch::nn::ModuleList mlist( /// torch::nn::Linear(3, 4), /// torch::nn::BatchNorm1d(4), /// torch::nn::Dropout(0.5) /// ); /// /// // Convert all modules to CUDA. /// mlist->to(torch::kCUDA); /// /// \endrst /// /// Finally, `ModuleList` provides a lightweight container API, such as allowing /// iteration over submodules, positional access, adding a new module after /// construction via `push_back`, as well as joining two `ModuleList`s via /// `extend`. class ModuleListImpl : public Cloneable<ModuleListImpl> { public: using Iterator = std::vector<std::shared_ptr<Module>>::iterator; using ConstIterator = std::vector<std::shared_ptr<Module>>::const_iterator; ModuleListImpl() = default; /// Constructs the `ModuleList` from a variadic list of modules. template <typename... Modules> explicit ModuleListImpl(Modules&&... modules) { modules_.reserve(sizeof...(Modules)); push_back_var(std::forward<Modules>(modules)...); } /// Special cloning function for `ModuleList` because it does not use /// `reset()`. std::shared_ptr<Module> clone( const std::optional<Device>& device = std::nullopt) const override { auto clone = std::make_shared<ModuleListImpl>(); for (const auto& module : modules_) { clone->push_back(module->clone(device)); } return clone; } /// `reset()` is empty for `ModuleList`, since it does not have parameters of /// its own. void reset() override {} /// Pretty prints the `ModuleList` module into the given `stream`. void pretty_print(std::ostream& stream) const override { stream << "torch::nn::ModuleList"; } void push_back(std::shared_ptr<Module> module) { modules_.push_back(std::move(module)); const auto index = modules_.size() - 1; register_module(std::to_string(index), modules_[index]); } /// Adds a new `Module` to the `ModuleList` container, moving or copying /// it into a `shared_ptr` internally. This method allows passing value types, /// and letting the container deal with the boxing. template <typename M, typename = torch::detail::enable_if_module_t<M>> void push_back(M&& module) { using Type = std::remove_reference_t<M>; push_back(std::make_shared<Type>(std::forward<M>(module))); } /// Unwraps the contained module of a `ModuleHolder` and adds it to the /// `ModuleList`. template <typename M> void push_back(const ModuleHolder<M>& module_holder) { push_back(module_holder.ptr()); } /// Iterates over the container and calls `push_back()` on each value. template <typename Container> void extend(const Container& container) { for (const auto& module : container) { push_back(module); } } /// Returns an iterator to the start of the `ModuleList`. Iterator begin() { return modules_.begin(); } /// Returns a const iterator to the start of the `ModuleList`. ConstIterator begin() const { return modules_.begin(); } /// Returns an iterator to the end of the `ModuleList`. Iterator end() { return modules_.end(); } /// Returns a const iterator to the end of the `ModuleList`. ConstIterator end() const { return modules_.end(); } /// Attempts to return the module at the given index as the requested type. /// Throws an exception if the index is out of bounds or the types do not /// match. template <typename T> T& at(size_t index) { static_assert( torch::detail::is_module<T>::value, "Can only call ModuleList::at with an nn::Module type"); TORCH_CHECK(index < size(), "Index out of range"); auto module = modules_[index]->as<T>(); TORCH_CHECK( module, "Unable to cast module[", index, "] to ", c10::demangle(typeid(T).name())); return *module; } /// Attempts to return the module at the given index as the requested type. /// Throws an exception if the index is out of bounds or the types do not /// match. template <typename T> const T& at(size_t index) const { static_assert( torch::detail::is_module<T>::value, "Can only call ModuleList::at with an nn::Module type"); TORCH_CHECK(index < size(), "Index out of range"); const auto module = modules_[index]->as<T>(); TORCH_CHECK( module, "Unable to cast module[", index, "] to ", c10::demangle(typeid(T).name())); return *module; } /// Attempts to return a `std::shared_ptr` whose dynamic type is that of the /// underlying module at the given index. Throws an exception if the index is /// out of bounds. std::shared_ptr<Module> ptr(size_t index) const { TORCH_CHECK(index < size(), "Index out of range"); return modules_[index]; } /// Attempts to return a `std::shared_ptr` whose type is the one provided. /// Throws an exception if the index is out of bounds or the types do not /// match. template <typename T> std::shared_ptr<T> ptr(size_t index) const { static_assert( torch::detail::is_module<T>::value, "Can only call ModuleList::ptr with an nn::Module type"); TORCH_CHECK(index < size(), "Index out of range"); return std::dynamic_pointer_cast<T>(modules_[index]); } /// Like `ptr(index)`. std::shared_ptr<Module> operator[](size_t index) const { // This is the only method we can call without a type. return ptr(index); } /// The current size of the `ModuleList` container. size_t size() const noexcept { return modules_.size(); } /// True if there are no modules in the `ModuleList`. bool is_empty() const noexcept { return size() == 0; } void insert(size_t index, std::shared_ptr<Module> module) { TORCH_CHECK(index <= size(), "Index out of range"); if (index == size()) push_back(std::move(module)); else { modules_.insert( modules_.begin() + Iterator::difference_type(index), std::move(module)); for (const auto i : c10::irange(index, size() - 1)) { (void)i; // Suppress unused variable warning replace_module(std::to_string(index), modules_[index]); } register_module(std::to_string(size() - 1), modules_.back()); } } /// Unwraps the contained module of a `ModuleHolder` and inserts it in the /// `ModuleList`. template <typename M> void insert(size_t index, const ModuleHolder<M>& module_holder) { insert(index, module_holder.ptr()); } /// inserts a new `Module` to the `ModuleList` container, moving or copying /// it into a `shared_ptr` internally. This method allows passing value types, /// and letting the container deal with the boxing. template <typename M, typename = torch::detail::enable_if_module_t<M>> void insert(size_t index, M&& module) { using Type = std::remove_reference_t<M>; insert(index, std::make_shared<Type>(std::forward<M>(module))); } private: template <typename Head, typename... Tail> void push_back_var(Head&& head, Tail&&... tail) { push_back(std::forward<Head>(head)); // Recursively calls this method, until the parameter pack only thas this // entry left. Then calls `push_back()` a final time (above). push_back_var(std::forward<Tail>(tail)...); } /// The base case, when the list of modules is empty. void push_back_var() {} // Box the AnyModules to give ModuleList reference semantics, like the rest of // the API. Note that this is not required otherwise, this could just be a // `vector<AnyModule>`. std::vector<std::shared_ptr<Module>> modules_; }; /// A `ModuleHolder` subclass for `ModuleListImpl`. /// See the documentation for `ModuleListImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(ModuleList); } // namespace torch::nn ```
=================================================================================================================================================================== SOURCE CODE FILE: named_any.h LINES: 1 SIZE: 2.46 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\container\named_any.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/modules/container/any.h> #include <torch/types.h> #include <memory> #include <type_traits> #include <utility> namespace torch::nn { /// Stores a type erased `Module` with name. /// /// The `NamedAnyModule` class enables the following API for constructing /// `nn::Sequential` with named submodules: /// \rst /// .. code-block:: cpp /// /// struct M : torch::nn::Module { /// explicit M(int value_) : value(value_) {} /// int value; /// int forward() { /// return value; /// } /// }; /// /// Sequential sequential({ /// {"m1", std::make_shared<M>(1)}, // shared pointer to `Module` is /// supported {std::string("m2"), M(2)}, // `Module` is supported /// {"linear1", Linear(10, 3)} // `ModuleHolder` is supported /// }); /// \endrst class NamedAnyModule { public: /// Creates a `NamedAnyModule` from a (boxed) `Module`. template <typename ModuleType> NamedAnyModule(std::string name, std::shared_ptr<ModuleType> module_ptr) : NamedAnyModule(std::move(name), AnyModule(std::move(module_ptr))) {} /// Creates a `NamedAnyModule` from a `Module`, moving or copying it /// into a `shared_ptr` internally. // NOTE: We need to use `std::remove_reference_t<M>` to get rid of // any reference components for make_unique. template <typename M, typename = torch::detail::enable_if_module_t<M>> NamedAnyModule(std::string name, M&& module) : NamedAnyModule( std::move(name), std::make_shared<std::remove_reference_t<M>>( std::forward<M>(module))) {} /// Creates a `NamedAnyModule` from a `Module` that is unwrapped from /// a `ModuleHolder`. template <typename M> NamedAnyModule(std::string name, const ModuleHolder<M>& module_holder) : NamedAnyModule(std::move(name), module_holder.ptr()) {} /// Creates a `NamedAnyModule` from a type-erased `AnyModule`. NamedAnyModule(std::string name, AnyModule any_module) : name_(std::move(name)), module_(std::move(any_module)) {} /// Returns a reference to the name. const std::string& name() const noexcept { return name_; } /// Returns a reference to the module. AnyModule& module() noexcept { return module_; } /// Returns a const reference to the module. const AnyModule& module() const noexcept { return module_; } private: std::string name_; AnyModule module_; }; } // namespace torch::nn ```
======================================================================================================================================================================= SOURCE CODE FILE: parameterdict.h LINES: 3 SIZE: 4.50 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\container\parameterdict.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/cloneable.h> #include <torch/nn/pimpl.h> #include <torch/ordered_dict.h> #include <utility> #include <vector> namespace torch::nn { class ParameterDictImpl : public Cloneable<ParameterDictImpl> { public: using Iterator = OrderedDict<std::string, Tensor>::Iterator; using ConstIterator = OrderedDict<std::string, Tensor>::ConstIterator; ParameterDictImpl() = default; explicit ParameterDictImpl( const torch::OrderedDict<std::string, torch::Tensor>& params) { parameters_ = params; } /// `reset()` is empty for `ParameterDict`, since it does not have /// parameters of its own. void reset() override {} /// Pretty prints the `ParameterDict` module into the given `stream`. void pretty_print(std::ostream& stream) const override { stream << "torch::nn::ParameterDict(" << '\n'; for (const auto& pair : parameters_) { stream << "(" << pair.key() << ")" << ": Parameter containing: [" << pair.value().scalar_type() << " of size " << pair.value().sizes() << "]"; ; stream << '\n'; } stream << ")"; } /// Insert the parameter along with the key into ParameterDict /// The parameter is set to be require grad by default Tensor& insert(const std::string& key, const Tensor& param) { bool requires_grad = param.requires_grad(); return register_parameter(key, param, requires_grad); } /// Remove key from the ParameterDict and return its value, throw exception /// if the key is not contained. Please check contains(key) before for a /// non-throwing access. Tensor pop(const std::string& key) { torch::Tensor v = parameters_[key]; parameters_.erase(key); return v; } /// Return the keys in the dict ::std::vector<std::string> keys() const { return parameters_.keys(); } /// Return the Values in the dict ::std::vector<torch::Tensor> values() const { return parameters_.values(); } /// Return an iterator to the start of ParameterDict Iterator begin() { return parameters_.begin(); } /// Return a const iterator to the start of ParameterDict ConstIterator begin() const { return parameters_.begin(); } /// Return an iterator to the end of ParameterDict Iterator end() { return parameters_.end(); } /// Return a const iterator to the end of ParameterDict ConstIterator end() const { return parameters_.end(); } /// Return the number of items currently stored in the ParameterDict size_t size() const noexcept { return parameters_.size(); } /// Return true if the ParameterDict is empty, otherwise return false bool empty() const noexcept { return parameters_.is_empty(); } /// Update the ParameterDict with the key-value pairs from /// another ParameterDict, overwriting existing key template <typename Container> void update(const Container& container) { for (auto& item : container) { parameters_[item.key()] = item.value(); } } /// Remove all parameters in the ParameterDict void clear() { parameters_.clear(); } /// Check if the centain parameter with the key in the ParameterDict bool contains(const std::string& key) const noexcept { return parameters_.contains(key); } /// Returns the value associated with the given `key`. Throws an exception if /// no such key is stored in the `ParameterDict`. Check contains(key) before /// for a non-throwing way of access const Tensor& get(const std::string& key) const { return parameters_[key]; } /// Returns the value associated with the given `key`. Throws an exception if /// no such key is stored in the `ParameterDict`. Check contains(key) before /// for a non-throwing way of access Tensor& get(const std::string& key) { return parameters_[key]; } /// Returns the value associated with the given `key`. Throws an exception if /// no such key is stored in the `ParameterDict`. Check contains(key) before /// for a non-throwing way of access Tensor& operator[](const std::string& key) { return parameters_[key]; } /// Returns the value associated with the given `key`. Throws an exception if /// no such key is stored in the `ParameterDict`. Check contains(key) before /// for a non-throwing way of access const Tensor& operator[](const std::string& key) const { return parameters_[key]; } }; TORCH_MODULE(ParameterDict); } // namespace torch::nn ```
======================================================================================================================================================================= SOURCE CODE FILE: parameterlist.h LINES: 3 SIZE: 5.61 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\container\parameterlist.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/cloneable.h> #include <torch/nn/module.h> #include <vector> namespace torch::nn { class ParameterListImpl : public Cloneable<ParameterListImpl> { public: using Iterator = typename std::vector< OrderedDict<std::string, torch::Tensor>::Item>::iterator; using ConstIterator = typename std::vector< OrderedDict<std::string, torch::Tensor>::Item>::const_iterator; ParameterListImpl() = default; /// Constructs the `ParameterList` from a variadic list of ParameterList. template <typename... Tensors> explicit ParameterListImpl(Tensors&&... params) { parameters_.reserve(sizeof...(Tensors)); push_back_var(std::forward<Tensors>(params)...); } template <typename... Tensors> explicit ParameterListImpl(const Tensors&... params) { parameters_.reserve(sizeof...(Tensors)); push_back_var(std::forward<Tensors>(params)...); } /// `reset()` is empty for `ParameterList`, since it does not have parameters /// of its own. void reset() override {} /// Pretty prints the `ParameterList` module into the given `stream`. void pretty_print(std::ostream& stream) const override { stream << "torch::nn::ParameterList(" << '\n'; for (const auto& pair : parameters_) { stream << "(" << pair.key() << ")" << ": Parameter containing: [" << pair.value().scalar_type() << " of size " << pair.value().sizes() << "]"; ; stream << '\n'; } stream << ")"; } /// push the a given parameter at the end of the list void append(torch::Tensor&& param) { bool requires_grad = param.requires_grad(); register_parameter( std::to_string(parameters_.size()), std::move(param), requires_grad); } /// push the a given parameter at the end of the list void append(const torch::Tensor& param) { bool requires_grad = param.requires_grad(); register_parameter( std::to_string(parameters_.size()), param, requires_grad); } /// push the a given parameter at the end of the list /// And the key of the pair will be discarded, only the value /// will be added into the `ParameterList` void append(const OrderedDict<std::string, torch::Tensor>::Item& pair) { register_parameter( std::to_string(parameters_.size()), pair.value(), pair.value().requires_grad()); } /// extend parameters from a container to the end of the list template <typename Container> void extend(const Container& container) { for (const auto& param : container) { append(param); } } /// Returns an iterator to the start of the ParameterList /// the iterator returned will be type of `OrderedDict<std::string, /// torch::Tensor>::Item` Iterator begin() { return parameters_.begin(); } /// Returns a const iterator to the start of the ParameterList /// the iterator returned will be type of `OrderedDict<std::string, /// torch::Tensor>::Item` ConstIterator begin() const { return parameters_.begin(); } /// Returns an iterator to the end of the ParameterList /// the iterator returned will be type of `OrderedDict<std::string, /// torch::Tensor>::Item` Iterator end() { return parameters_.end(); } /// Returns a const iterator to the end of the ParameterList /// the iterator returned will be type of `OrderedDict<std::string, /// torch::Tensor>::Item` ConstIterator end() const { return parameters_.end(); } /// Returns the value associated with the given `key`. Throws an exception if /// no such key is stored in the `ParameterList`. Check contains(key) before /// for a non-throwing way of access at::Tensor& at(size_t idx) { TORCH_CHECK(idx < size(), "Index out of range"); return parameters_[std::to_string(idx)]; } /// Returns the value associated with the given `key`. Throws an exception if /// no such key is stored in the `ParameterList`. Check contains(key) before /// for a non-throwing way of access const at::Tensor& at(size_t idx) const { TORCH_CHECK(idx < size(), "Index out of range"); return parameters_[std::to_string(idx)]; } /// Returns the value associated with the given `key`. Throws an exception if /// no such key is stored in the `ParameterList`. Check contains(key) before /// for a non-throwing way of access at::Tensor& operator[](size_t idx) { return at(idx); } /// Returns the value associated with the given `key`. Throws an exception if /// no such key is stored in the `ParameterList`. Check contains(key) before /// for a non-throwing way of access const at::Tensor& operator[](size_t idx) const { return at(idx); } /// Return the size of the ParameterList size_t size() const noexcept { return parameters_.size(); } /// True if the ParameterList is empty bool is_empty() const noexcept { return parameters_.is_empty(); } /// Overload the +=, so that two ParameterList could be incrementally added template <typename Container> Container& operator+=(const Container& other) { extend(other); return *this; } private: template <typename Head, typename... Tail> void push_back_var(Head&& head, Tail&&... tail) { append(std::forward<Head>(head)); // Recursively calls this method, until the parameter pack only thas this // entry left. Then calls `push_back()` a final time (above). push_back_var(std::forward<Tail>(tail)...); } /// The base case, when the list of modules is empty. void push_back_var() {} }; TORCH_MODULE(ParameterList); } // namespace torch::nn ```
==================================================================================================================================================================== SOURCE CODE FILE: sequential.h LINES: 1 SIZE: 13.79 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\container\sequential.h ENCODING: utf-8 ```h #pragma once #include <torch/detail/static.h> #include <torch/nn/cloneable.h> #include <torch/nn/module.h> #include <torch/nn/modules/container/any.h> #include <torch/nn/modules/container/named_any.h> #include <torch/nn/pimpl.h> #include <torch/types.h> #include <c10/util/Exception.h> #include <cstdint> #include <memory> #include <ostream> #include <string> #include <type_traits> #include <utility> #include <vector> namespace torch::nn { /// A list of `Module`s that acts as a `Module` itself. /// /// A `Sequential` is fundamentally a list of `Module`s, each with a `forward()` /// method. `Sequential` provides a `forward()` method of its own, which accepts /// any input and forwards it to the first module it stores. It then "chains" /// outputs to inputs sequentially for each subsequent module, finally returning /// the output of the last module. For example: /// /// \rst /// .. code-block:: cpp /// /// torch::nn::Sequential seq( /// torch::nn::Linear(3, 4), /// torch::nn::BatchNorm1d(4), /// torch::nn::Dropout(0.5) /// ); /// /// auto output = seq->forward(torch::ones(3)); /// /// \endrst /// /// This can conceptually be thought of as the following loop (using Python as /// pseudocode): /// /// \rst /// .. code-block:: python /// /// def forward(sequential, input): /// for module in sequential: /// input = module(input) /// return input /// /// \endrst /// /// Why should you use `Sequential` instead of a simple `std::vector`? The value /// a `Sequential` provides over manually calling a sequence of modules is that /// it allows treating the whole container *as a single module*, such that /// performing a transformation on the `Sequential` applies to each of the /// modules it stores (which are each a registered submodule of the /// `Sequential`). For example, calling /// `.to(torch::kCUDA)` on a `Sequential` will move each module in the list to /// CUDA memory. For example: /// /// \rst /// .. code-block:: cpp /// /// torch::nn::Sequential seq( /// torch::nn::Linear(3, 4), /// torch::nn::BatchNorm1d(4), /// torch::nn::Dropout(0.5) /// ); /// /// // Convert all modules to CUDA. /// seq->to(torch::kCUDA); /// /// \endrst /// /// Finally, `Sequential` provides a lightweight container API, such as allowing /// iteration over submodules, positional access, adding a new module after /// construction via `push_back`, as well as joining two `Sequential`s via /// `extend`. /// /// \rst /// .. attention:: /// One current limitation of `Sequential` is that all except the first module /// must accept a single argument. If your modules need to take multiple /// arguments, you should define them to take and return tuples. /// \endrst class SequentialImpl : public Cloneable<SequentialImpl> { public: using Iterator = std::vector<AnyModule>::iterator; using ConstIterator = std::vector<AnyModule>::const_iterator; SequentialImpl() = default; /// Constructs the `Sequential` from a variadic list of modules. template <typename... Modules> explicit SequentialImpl(Modules&&... modules) { modules_.reserve(sizeof...(Modules)); push_back(std::forward<Modules>(modules)...); } /// Constructs the `Sequential` from an `OrderedDict` of named `AnyModule`s. explicit SequentialImpl( torch::OrderedDict<std::string, AnyModule>&& ordered_dict) { modules_.reserve(ordered_dict.size()); for (auto& item : ordered_dict) { push_back(item.key(), std::move(item.value())); } } /// Constructs the `Sequential` from a braced-init-list of named `AnyModule`s. /// It enables the following use case: /// `Sequential sequential({{"m1", M(1)}, {"m2", M(2)}})` explicit SequentialImpl(std::initializer_list<NamedAnyModule> named_modules) { modules_.reserve(named_modules.size()); for (const auto& named_module : named_modules) { push_back(named_module.name(), named_module.module()); } } /// Special cloning function for `Sequential` because it does not use /// `reset()`. std::shared_ptr<Module> clone( const std::optional<Device>& device = std::nullopt) const override { auto clone = std::make_shared<SequentialImpl>(); for (const auto& module : modules_) { clone->push_back(module.clone(device)); } return clone; } /// `reset()` is empty for `Sequential`, since it does not have parameters of /// its own. void reset() override {} /// Pretty prints the `Sequential` module into the given `stream`. void pretty_print(std::ostream& stream) const override { stream << "torch::nn::Sequential"; } /// Feeds `inputs` to the first module and then chains outputs to inputs, /// returning the last output. /// /// Conceptually the following loop in Python: /// /// \rst /// .. code-block:: python /// /// def forward(sequential, input): /// for module in sequential: /// input = module(input) /// return input /// /// \endrst /// /// The return type is taken as the first template parameter. It defaults to /// `Tensor`. If the last module in the `Sequential` returns another type `T`, /// you should call `forward<T>(inputs)` instead of just `forward(inputs)`: /// /// \rst /// .. code-block:: cpp /// /// torch::Tensor tensor = sequential1->forward(inputs); /// int integer = sequential2->forward<int>(inputs); /// float value = sequential3->forward<float>(inputs); /// /// \endrst template <typename ReturnType = Tensor, typename... InputTypes> ReturnType forward(InputTypes&&... inputs) { TORCH_CHECK(!is_empty(), "Cannot call forward() on an empty Sequential"); auto iterator = modules_.begin(); auto input = iterator->any_forward(std::forward<InputTypes>(inputs)...); for (++iterator; iterator != modules_.end(); ++iterator) { input = iterator->any_forward(std::move(input)); } // Check the return value and give a nice error message if the requested // return type was incorrect. if (auto* return_value = input.template try_get<ReturnType>()) { return std::move(*return_value); } TORCH_CHECK( false, "The type of the return value is ", c10::demangle(input.type_info().name()), ", but you asked for type ", c10::demangle(typeid(ReturnType).name())); } /// Adds a new (boxed) `Module` to the `Sequential` container. template <typename ModuleType> void push_back(std::shared_ptr<ModuleType> module_ptr) { push_back(std::to_string(modules_.size()), std::move(module_ptr)); } /// Adds a new named (boxed) `Module` to the `Sequential` container. template <typename ModuleType> void push_back(std::string name, std::shared_ptr<ModuleType> module_ptr) { push_back(std::move(name), AnyModule(std::move(module_ptr))); } /// Adds a new `Module` to the `Sequential` container, moving or copying it /// into a `shared_ptr` internally. This method allows passing value types, /// and letting the container deal with the boxing. This means you can write /// `Sequential(Module(3, 4))` instead of /// `Sequential(std::make_shared<Module>(3, 4))`. template <typename M, typename = torch::detail::enable_if_module_t<M>> void push_back(M&& module) { push_back(std::to_string(modules_.size()), std::forward<M>(module)); } /// Adds a new named `Module` to the `Sequential` container, moving or copying /// it into a `shared_ptr` internally. This method allows passing value types, /// and letting the container deal with the boxing. template <typename M, typename = torch::detail::enable_if_module_t<M>> void push_back(std::string name, M&& module) { using Type = typename std::remove_reference_t<M>; push_back(std::move(name), std::make_shared<Type>(std::forward<M>(module))); } /// Unwraps the contained module of a `ModuleHolder` and adds it to the /// `Sequential`. template <typename M> void push_back(const ModuleHolder<M>& module_holder) { push_back(std::to_string(modules_.size()), module_holder); } /// Unwraps the contained named module of a `ModuleHolder` and adds it to the /// `Sequential`. template <typename M> void push_back(std::string name, const ModuleHolder<M>& module_holder) { push_back(std::move(name), module_holder.ptr()); } /// Iterates over the container and calls `push_back()` on each value. template <typename Container> void extend(const Container& container) { for (const auto& module : container) { push_back(module); } } /// Adds a type-erased `AnyModule` to the `Sequential`. void push_back(AnyModule any_module) { push_back(std::to_string(modules_.size()), std::move(any_module)); } void push_back(std::string name, AnyModule any_module) { modules_.push_back(std::move(any_module)); const auto index = modules_.size() - 1; register_module(std::move(name), modules_[index].ptr()); } /// Returns an iterator to the start of the `Sequential`. Iterator begin() { return modules_.begin(); } /// Returns a const iterator to the start of the `Sequential`. ConstIterator begin() const { return modules_.begin(); } /// Returns an iterator to the end of the `Sequential`. Iterator end() { return modules_.end(); } /// Returns a const iterator to the end of the `Sequential`. ConstIterator end() const { return modules_.end(); } /// Attempts to return the module at the given index as the requested type. /// Throws an exception if the index is out of bounds or the types do not /// match. template <typename T> T& at(size_t index) { static_assert( torch::detail::is_module<T>::value, "Can only call Sequential::at with an nn::Module type"); TORCH_CHECK(index < size(), "Index out of range"); return modules_[index].get<T>(); } /// Attempts to return the module at the given index as the requested type. /// Throws an exception if the index is out of bounds or the types do not /// match. template <typename T> const T& at(size_t index) const { static_assert( torch::detail::is_module<T>::value, "Can only call Sequential::at with an nn::Module type"); TORCH_CHECK(index < size(), "Index out of range"); return modules_[index].get<T>(); } /// Attempts to return a `std::shared_ptr` whose dynamic type is that of the /// underlying module at the given index. Throws an exception if the index is /// out of bounds. std::shared_ptr<Module> ptr(size_t index) const { TORCH_CHECK(index < size(), "Index out of range"); return modules_[index].ptr(); } /// Attempts to return a `std::shared_ptr` whose type is the one provided. /// Throws an exception if the index is out of bounds or the types do not /// match. template <typename T> std::shared_ptr<T> ptr(size_t index) const { static_assert( torch::detail::is_module<T>::value, "Can only call Sequential::ptr with an nn::Module type"); TORCH_CHECK(index < size(), "Index out of range"); return modules_[index].ptr<T>(); } /// Like `ptr(index)`. std::shared_ptr<Module> operator[](size_t index) const { // This is the only method we can call without a type. return ptr(index); } /// The current size of the `Sequential` container. size_t size() const noexcept { return modules_.size(); } /// True if there are no modules in the `Sequential`. bool is_empty() const noexcept { return size() == 0; } private: /// Takes a First *and* Second parameter, to avoid ambiguity when a parameter /// pack has only one type, in which case the template would be preferred, /// even if the other `push_back` functions are better fits (e.g. `unique_ptr` /// -> `shared_ptr` overload). /// NOTE: We explicitly avoid matching this template with /// `push_back(std::string("name"), module)` or `push_back("name", module)`, /// since they should be handled by their respective `push_back` functions. template < typename First, typename Second, typename... Rest, typename = std::enable_if_t< !std::is_same_v<First, std::string> && // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays) !std::is_same_v<std::decay_t<First>, std::decay_t<const char (&)[]>>>> void push_back(First&& first, Second&& second, Rest&&... rest) { push_back(std::forward<First>(first)); // Recursively calls this method, until the parameter pack only thas this // entry left. Then calls `push_back()` a final time (above). push_back(std::forward<Second>(second), std::forward<Rest>(rest)...); } /// The base case, when the list of modules is empty. void push_back() {} // Box the AnyModules to give Sequential reference semantics, like the rest of // the API. Note that this is not required otherwise, this could just be a // `vector<AnyModule>`. std::vector<AnyModule> modules_; }; /// A `ModuleHolder` subclass for `SequentialImpl`. /// See the documentation for `SequentialImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. class Sequential : public torch::nn::ModuleHolder<SequentialImpl> { public: using torch::nn::ModuleHolder<SequentialImpl>::ModuleHolder; Sequential() = default; /// Constructs the `Sequential` from a braced-init-list of named `AnyModule`s. /// It enables the following use case: /// `Sequential sequential({{"m1", M(1)}, {"m2", M(2)}})` Sequential(std::initializer_list<NamedAnyModule> named_modules) : ModuleHolder(std::make_shared<SequentialImpl>(named_modules)) {} }; } // namespace torch::nn ```
==================================================================================================================================================== SOURCE CODE FILE: conv.h LINES: 1 SIZE: 16.32 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\conv.h ENCODING: utf-8 ```h #pragma once #include <c10/util/irange.h> #include <c10/util/overloaded.h> #include <torch/expanding_array.h> #include <torch/nn/cloneable.h> #include <torch/nn/init.h> #include <torch/nn/modules/common.h> #include <torch/nn/modules/utils.h> #include <torch/nn/options/conv.h> #include <torch/nn/pimpl.h> #include <torch/types.h> #include <torch/csrc/Export.h> #include <cstddef> #include <vector> namespace torch::nn { /// Base class for all (dimension-specialized) convolution modules. template <size_t D, typename Derived> class ConvNdImpl : public torch::nn::Cloneable<Derived> { public: explicit ConvNdImpl(detail::ConvNdOptions<D> options_) : options(std::move(options_)) { ConvNdImpl::reset(); } void reset() override { TORCH_CHECK( options.in_channels() > 0 && options.groups() > 0 && options.out_channels() > 0, "in_channels, groups and out_channels must be a positive integer."); TORCH_CHECK( options.in_channels() % options.groups() == 0, "in_channels must be divisible by groups"); TORCH_CHECK( options.out_channels() % options.groups() == 0, "out_channels must be divisible by groups"); std::visit( c10::overloaded( [&](enumtype::kValid) { _reversed_padding_repeated_twice.resize(2 * D); std::fill_n(_reversed_padding_repeated_twice.begin(), 2 * D, 0); }, [&](enumtype::kSame) { for (const auto i : c10::irange(D)) { const auto stride = (*options.stride())[i]; TORCH_CHECK( stride == 1, "padding='same' is not supported for strided convolutions"); } _reversed_padding_repeated_twice.resize(2 * D); for (const auto i : c10::irange(D)) { const auto dilation = (*options.dilation())[i]; const auto kernel_size = (*options.kernel_size())[i]; const auto total_padding = dilation * (kernel_size - 1); auto left_pad = total_padding / 2; auto right_pad = total_padding - left_pad; _reversed_padding_repeated_twice[2 * i] = left_pad; _reversed_padding_repeated_twice[2 * i + 1] = right_pad; } }, [&](const ExpandingArray<D>& pad) { _reversed_padding_repeated_twice = torch::nn::modules::utils::_reverse_repeat_vector(pad, 2); }), options.padding()); if (options.transposed()) { std::vector<int64_t> weight_sizes = { options.in_channels(), options.out_channels() / options.groups()}; weight_sizes.insert( weight_sizes.end(), (*options.kernel_size()).begin(), (*options.kernel_size()).end()); weight = this->register_parameter("weight", torch::empty(weight_sizes)); } else { std::vector<int64_t> weight_sizes = { options.out_channels(), options.in_channels() / options.groups()}; weight_sizes.insert( weight_sizes.end(), (*options.kernel_size()).begin(), (*options.kernel_size()).end()); weight = this->register_parameter("weight", torch::empty(weight_sizes)); } if (options.bias()) { bias = this->register_parameter( "bias", torch::empty({options.out_channels()})); } else { this->register_parameter("bias", Tensor(), /*requires_grad=*/false); } reset_parameters(); } void reset_parameters() { init::kaiming_uniform_( weight, /*a=*/std::sqrt(5)); // NOLINT(cppcoreguidelines-avoid-magic-numbers) if (bias.defined()) { auto [fan_in, fan_out] = init::_calculate_fan_in_and_fan_out(weight); auto bound = 1 / std::sqrt(fan_in); init::uniform_(bias, -bound, bound); } } /// Pretty prints the `Conv{1,2,3}d` module into the given `stream`. void pretty_print(std::ostream& stream) const override { stream << "torch::nn::Conv" << D << "d" << "(" << options.in_channels() << ", " << options.out_channels() << ", kernel_size=" << options.kernel_size() << ", stride=" << options.stride(); std::visit( c10::overloaded( [&](enumtype::kValid) { stream << ", padding='valid'"; }, [&](enumtype::kSame) { stream << ", padding='same'"; }, [&](const ExpandingArray<D>& pad) { if (*pad != *ExpandingArray<D>(0)) { stream << ", padding=" << pad; } }), options.padding()); if (*options.dilation() != *ExpandingArray<D>(1)) { stream << ", dilation=" << options.dilation(); } if (*options.output_padding() != *ExpandingArray<D>(0)) { stream << ", output_padding=" << options.output_padding(); } if (options.groups() != 1) { stream << ", groups=" << options.groups(); } if (!options.bias()) { stream << ", bias=" << std::boolalpha << false; } if (!std::get_if<enumtype::kZeros>(&options.padding_mode())) { stream << ", padding_mode=" << enumtype::get_enum_name(options.padding_mode()); } stream << ")"; } /// The options with which this `Module` was constructed. // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) detail::ConvNdOptions<D> options; /// The learned kernel (or "weight"). // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) Tensor weight; /// The learned bias. Only defined if the `bias` option was true. // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) Tensor bias; protected: // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) std::vector<int64_t> _reversed_padding_repeated_twice; }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Conv1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies convolution over a 1-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Conv1d to learn about /// the exact behavior of this module. /// /// See the documentation for `torch::nn::Conv1dOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Conv1d model(Conv1dOptions(3, 2, 3).stride(1).bias(false)); /// ``` class TORCH_API Conv1dImpl : public ConvNdImpl<1, Conv1dImpl> { public: Conv1dImpl( int64_t input_channels, int64_t output_channels, ExpandingArray<1> kernel_size) : Conv1dImpl( Conv1dOptions(input_channels, output_channels, kernel_size)) {} explicit Conv1dImpl(Conv1dOptions options_); Tensor forward(const Tensor& input); }; /// A `ModuleHolder` subclass for `Conv1dImpl`. /// See the documentation for `Conv1dImpl` class to learn what methods it /// provides, and examples of how to use `Conv1d` with /// `torch::nn::Conv1dOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Conv1d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Conv2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies convolution over a 2-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Conv2d to learn about /// the exact behavior of this module. /// /// See the documentation for `torch::nn::Conv2dOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Conv2d model(Conv2dOptions(3, 2, 3).stride(1).bias(false)); /// ``` class TORCH_API Conv2dImpl : public ConvNdImpl<2, Conv2dImpl> { public: Conv2dImpl( int64_t input_channels, int64_t output_channels, ExpandingArray<2> kernel_size) : Conv2dImpl( Conv2dOptions(input_channels, output_channels, kernel_size)) {} explicit Conv2dImpl(Conv2dOptions options_); Tensor forward(const Tensor& input); protected: Tensor _conv_forward(const Tensor& input, const Tensor& weight); }; /// A `ModuleHolder` subclass for `Conv2dImpl`. /// See the documentation for `Conv2dImpl` class to learn what methods it /// provides, and examples of how to use `Conv2d` with /// `torch::nn::Conv2dOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Conv2d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Conv3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies convolution over a 3-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Conv3d to learn about /// the exact behavior of this module. /// /// See the documentation for `torch::nn::Conv3dOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Conv3d model(Conv3dOptions(3, 2, 3).stride(1).bias(false)); /// ``` class TORCH_API Conv3dImpl : public ConvNdImpl<3, Conv3dImpl> { public: Conv3dImpl( int64_t input_channels, int64_t output_channels, ExpandingArray<3> kernel_size) : Conv3dImpl( Conv3dOptions(input_channels, output_channels, kernel_size)) {} explicit Conv3dImpl(Conv3dOptions options_); Tensor forward(const Tensor& input); }; /// A `ModuleHolder` subclass for `Conv3dImpl`. /// See the documentation for `Conv3dImpl` class to learn what methods it /// provides, and examples of how to use `Conv3d` with /// `torch::nn::Conv3dOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Conv3d); // ~~~~~~~~~~~~~~~~~~~~~~~~~ ConvTranspose ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Base class for all (dimension-specialized) convolution transpose modules. template <size_t D, typename Derived> class ConvTransposeNdImpl : public ConvNdImpl<D, Derived> { public: using torch::nn::ConvNdImpl<D, Derived>::ConvNdImpl; explicit ConvTransposeNdImpl(detail::ConvNdOptions<D> options_) : ConvNdImpl<D, Derived>(options_) { TORCH_INTERNAL_ASSERT( std::holds_alternative<ExpandingArray<D>>(this->options.padding()), "ConvTranspose padding cannot be a string"); } /// Pretty prints the `ConvTranspose{1,2,3}d` module into the given `stream`. void pretty_print(std::ostream& stream) const override { stream << "torch::nn::ConvTranspose" << D << "d" << "(" << this->options.in_channels() << ", " << this->options.out_channels() << ", kernel_size=" << this->options.kernel_size() << ", stride=" << this->options.stride(); const auto& pad = padding(); if (*pad != *ExpandingArray<D>(0)) { stream << ", padding=" << pad; } if (*this->options.dilation() != *ExpandingArray<D>(1)) { stream << ", dilation=" << this->options.dilation(); } if (*this->options.output_padding() != *ExpandingArray<D>(0)) { stream << ", output_padding=" << this->options.output_padding(); } if (this->options.groups() != 1) { stream << ", groups=" << this->options.groups(); } if (!this->options.bias()) { stream << ", bias=" << std::boolalpha << false; } if (!std::get_if<enumtype::kZeros>(&this->options.padding_mode())) { stream << ", padding_mode=" << enumtype::get_enum_name(this->options.padding_mode()); } stream << ")"; } protected: const ExpandingArray<D>& padding() const { return std::get<ExpandingArray<D>>(this->options.padding()); } std::vector<int64_t> _output_padding( const Tensor& input, const std::optional<at::IntArrayRef>& output_size, const ExpandingArray<D>& stride, const ExpandingArray<D>& padding, const ExpandingArray<D>& kernel_size); }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConvTranspose1d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the ConvTranspose1d function. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ConvTranspose1d to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::ConvTranspose1dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// ConvTranspose1d model(ConvTranspose1dOptions(3, 2, /// 3).stride(1).bias(false)); /// ``` class TORCH_API ConvTranspose1dImpl : public ConvTransposeNdImpl<1, ConvTranspose1dImpl> { public: ConvTranspose1dImpl( int64_t input_channels, int64_t output_channels, ExpandingArray<1> kernel_size) : ConvTranspose1dImpl(ConvTranspose1dOptions( input_channels, output_channels, kernel_size)) {} explicit ConvTranspose1dImpl(ConvTranspose1dOptions options_); Tensor forward( const Tensor& input, const std::optional<at::IntArrayRef>& output_size = std::nullopt); protected: FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(std::optional<at::IntArrayRef>())}) }; /// A `ModuleHolder` subclass for `ConvTranspose1dImpl`. /// See the documentation for `ConvTranspose1dImpl` class to learn what methods /// it provides, and examples of how to use `ConvTranspose1d` with /// `torch::nn::ConvTranspose1dOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(ConvTranspose1d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConvTranspose2d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the ConvTranspose2d function. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ConvTranspose2d to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::ConvTranspose2dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// ConvTranspose2d model(ConvTranspose2dOptions(3, 2, /// 3).stride(1).bias(false)); /// ``` class TORCH_API ConvTranspose2dImpl : public ConvTransposeNdImpl<2, ConvTranspose2dImpl> { public: ConvTranspose2dImpl( int64_t input_channels, int64_t output_channels, ExpandingArray<2> kernel_size) : ConvTranspose2dImpl(ConvTranspose2dOptions( input_channels, output_channels, kernel_size)) {} explicit ConvTranspose2dImpl(ConvTranspose2dOptions options_); Tensor forward( const Tensor& input, const std::optional<at::IntArrayRef>& output_size = std::nullopt); protected: FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(std::optional<at::IntArrayRef>())}) }; /// A `ModuleHolder` subclass for `ConvTranspose2dImpl`. /// See the documentation for `ConvTranspose2dImpl` class to learn what methods /// it provides, and examples of how to use `ConvTranspose2d` with /// `torch::nn::ConvTranspose2dOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(ConvTranspose2d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConvTranspose3d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the ConvTranspose3d function. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ConvTranspose3d to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::ConvTranspose3dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// ConvTranspose3d model(ConvTranspose3dOptions(2, 2, /// 2).stride(1).bias(false)); /// ``` class TORCH_API ConvTranspose3dImpl : public ConvTransposeNdImpl<3, ConvTranspose3dImpl> { public: ConvTranspose3dImpl( int64_t input_channels, int64_t output_channels, ExpandingArray<3> kernel_size) : ConvTranspose3dImpl(ConvTranspose3dOptions( input_channels, output_channels, kernel_size)) {} explicit ConvTranspose3dImpl(ConvTranspose3dOptions options_); Tensor forward( const Tensor& input, const std::optional<at::IntArrayRef>& output_size = std::nullopt); protected: FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(std::optional<at::IntArrayRef>())}) }; /// A `ModuleHolder` subclass for `ConvTranspose3dImpl`. /// See the documentation for `ConvTranspose3dImpl` class to learn what methods /// it provides, and examples of how to use `ConvTranspose3d` with /// `torch::nn::ConvTranspose3dOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(ConvTranspose3d); } // namespace torch::nn ```
======================================================================================================================================================== SOURCE CODE FILE: distance.h LINES: 1 SIZE: 3.07 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\distance.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/cloneable.h> #include <torch/nn/functional/distance.h> #include <torch/nn/options/distance.h> #include <torch/nn/pimpl.h> #include <torch/types.h> #include <torch/csrc/Export.h> namespace torch::nn { /// Returns the cosine similarity between :math:`x_1` and :math:`x_2`, computed /// along `dim`. /// See https://pytorch.org/docs/main/nn.html#torch.nn.CosineSimilarity to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::CosineSimilarityOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// CosineSimilarity model(CosineSimilarityOptions().dim(0).eps(0.5)); /// ``` class TORCH_API CosineSimilarityImpl : public Cloneable<CosineSimilarityImpl> { public: explicit CosineSimilarityImpl(const CosineSimilarityOptions& options_ = {}); void reset() override; /// Pretty prints the `CosineSimilarity` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input1, const Tensor& input2); /// The options with which this `Module` was constructed. CosineSimilarityOptions options; }; /// A `ModuleHolder` subclass for `CosineSimilarityImpl`. /// See the documentation for `CosineSimilarityImpl` class to learn what methods /// it provides, and examples of how to use `CosineSimilarity` with /// `torch::nn::CosineSimilarityOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(CosineSimilarity); // ============================================================================ /// Returns the batchwise pairwise distance between vectors :math:`v_1`, /// :math:`v_2` using the p-norm. /// See https://pytorch.org/docs/main/nn.html#torch.nn.PairwiseDistance to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::PairwiseDistanceOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// PairwiseDistance /// model(PairwiseDistanceOptions().p(3).eps(0.5).keepdim(true)); /// ``` class TORCH_API PairwiseDistanceImpl : public Cloneable<PairwiseDistanceImpl> { public: explicit PairwiseDistanceImpl(const PairwiseDistanceOptions& options_ = {}); void reset() override; /// Pretty prints the `PairwiseDistance` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input1, const Tensor& input2); /// The options with which this `Module` was constructed. PairwiseDistanceOptions options; }; /// A `ModuleHolder` subclass for `PairwiseDistanceImpl`. /// See the documentation for `PairwiseDistanceImpl` class to learn what methods /// it provides, and examples of how to use `PairwiseDistance` with /// `torch::nn::PairwiseDistanceOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(PairwiseDistance); } // namespace torch::nn ```
======================================================================================================================================================= SOURCE CODE FILE: dropout.h LINES: 1 SIZE: 6.43 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\dropout.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/cloneable.h> #include <torch/nn/options/dropout.h> #include <torch/nn/pimpl.h> #include <torch/types.h> #include <torch/csrc/Export.h> namespace torch::nn { namespace detail { template <typename Derived> class _DropoutNd : public torch::nn::Cloneable<Derived> { public: _DropoutNd(double p) : _DropoutNd(DropoutOptions().p(p)) {} explicit _DropoutNd(const DropoutOptions& options_ = {}) : options(options_) { _DropoutNd::reset(); } void reset() override { TORCH_CHECK( options.p() >= 0. && options.p() <= 1., "dropout probability has to be between 0 and 1, but got ", options.p()); } /// The options with which this `Module` was constructed. DropoutOptions options; }; } // namespace detail // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dropout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies dropout over a 1-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Dropout to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::DropoutOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Dropout model(DropoutOptions().p(0.42).inplace(true)); /// ``` class TORCH_API DropoutImpl : public detail::_DropoutNd<DropoutImpl> { public: using detail::_DropoutNd<DropoutImpl>::_DropoutNd; Tensor forward(Tensor input); /// Pretty prints the `Dropout` module into the given `stream`. void pretty_print(std::ostream& stream) const override; }; /// A `ModuleHolder` subclass for `DropoutImpl`. /// See the documentation for `DropoutImpl` class to learn what methods it /// provides, and examples of how to use `Dropout` with /// `torch::nn::DropoutOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Dropout); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dropout2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies dropout over a 2-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Dropout2d to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::Dropout2dOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Dropout2d model(Dropout2dOptions().p(0.42).inplace(true)); /// ``` class TORCH_API Dropout2dImpl : public detail::_DropoutNd<Dropout2dImpl> { public: using detail::_DropoutNd<Dropout2dImpl>::_DropoutNd; Tensor forward(Tensor input); /// Pretty prints the `Dropout2d` module into the given `stream`. void pretty_print(std::ostream& stream) const override; }; /// A `ModuleHolder` subclass for `Dropout2dImpl`. /// See the documentation for `Dropout2dImpl` class to learn what methods it /// provides, and examples of how to use `Dropout2d` with /// `torch::nn::Dropout2dOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Dropout2d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dropout3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies dropout over a 3-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Dropout3d to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::Dropout3dOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Dropout3d model(Dropout3dOptions().p(0.42).inplace(true)); /// ``` class TORCH_API Dropout3dImpl : public detail::_DropoutNd<Dropout3dImpl> { public: using detail::_DropoutNd<Dropout3dImpl>::_DropoutNd; Tensor forward(Tensor input); /// Pretty prints the `Dropout3d` module into the given `stream`. void pretty_print(std::ostream& stream) const override; }; /// A `ModuleHolder` subclass for `Dropout3dImpl`. /// See the documentation for `Dropout3dImpl` class to learn what methods it /// provides, and examples of how to use `Dropout3d` with /// `torch::nn::Dropout3dOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Dropout3d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AlphaDropout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies Alpha Dropout over the input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.AlphaDropout to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::AlphaDropoutOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// AlphaDropout model(AlphaDropoutOptions(0.2).inplace(true)); /// ``` class TORCH_API AlphaDropoutImpl : public detail::_DropoutNd<AlphaDropoutImpl> { public: using detail::_DropoutNd<AlphaDropoutImpl>::_DropoutNd; Tensor forward(const Tensor& input); /// Pretty prints the `AlphaDropout` module into the given `stream`. void pretty_print(std::ostream& stream) const override; }; /// A `ModuleHolder` subclass for `AlphaDropoutImpl`. /// See the documentation for `AlphaDropoutImpl` class to learn what methods it /// provides, and examples of how to use `AlphaDropout` with /// `torch::nn::AlphaDropoutOptions`. See the documentation for `ModuleHolder` /// to learn about PyTorch's module storage semantics. TORCH_MODULE(AlphaDropout); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FeatureAlphaDropout // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// See the documentation for `torch::nn::FeatureAlphaDropoutOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// FeatureAlphaDropout model(FeatureAlphaDropoutOptions(0.2).inplace(true)); /// ``` class TORCH_API FeatureAlphaDropoutImpl : public detail::_DropoutNd<FeatureAlphaDropoutImpl> { public: using detail::_DropoutNd<FeatureAlphaDropoutImpl>::_DropoutNd; Tensor forward(const Tensor& input); /// Pretty prints the `FeatureAlphaDropout` module into the given `stream`. void pretty_print(std::ostream& stream) const override; }; /// A `ModuleHolder` subclass for `FeatureAlphaDropoutImpl`. /// See the documentation for `FeatureAlphaDropoutImpl` class to learn what /// methods it provides, and examples of how to use `FeatureAlphaDropout` with /// `torch::nn::FeatureAlphaDropoutOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(FeatureAlphaDropout); } // namespace torch::nn ```
========================================================================================================================================================= SOURCE CODE FILE: embedding.h LINES: 1 SIZE: 6.07 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\embedding.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/cloneable.h> #include <torch/nn/functional/embedding.h> #include <torch/nn/modules/common.h> #include <torch/nn/options/embedding.h> #include <torch/nn/pimpl.h> #include <torch/types.h> #include <cstddef> namespace torch::nn { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Embedding // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Performs a lookup in a fixed size embedding table. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Embedding to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::EmbeddingOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Embedding model(EmbeddingOptions(10, /// 2).padding_idx(3).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true)); /// ``` class TORCH_API EmbeddingImpl : public torch::nn::Cloneable<EmbeddingImpl> { public: EmbeddingImpl(int64_t num_embeddings, int64_t embedding_dim) : EmbeddingImpl(EmbeddingOptions(num_embeddings, embedding_dim)) {} explicit EmbeddingImpl(EmbeddingOptions options_); void reset() override; void reset_parameters(); /// Pretty prints the `Embedding` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// Performs a lookup on the embedding table stored in `weight` using the /// `indices` supplied and returns the result. Tensor forward(const Tensor& indices); /// The `Options` used to configure this `Embedding` module. /// Changes to `EmbeddingOptions` *after construction* have no effect. EmbeddingOptions options; /// The embedding table. Tensor weight; }; /// A `ModuleHolder` subclass for `EmbeddingImpl`. /// See the documentation for `EmbeddingImpl` class to learn what methods it /// provides, and examples of how to use `Embedding` with /// `torch::nn::EmbeddingOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. class Embedding : public torch::nn::ModuleHolder<EmbeddingImpl> { public: using torch::nn::ModuleHolder<EmbeddingImpl>::ModuleHolder; /// See the documentation for `torch::nn::EmbeddingFromPretrainedOptions` /// class to learn what optional arguments are supported for this function. static Embedding from_pretrained( const torch::Tensor& embeddings, const EmbeddingFromPretrainedOptions& options = {}) { TORCH_CHECK( embeddings.dim() == 2, "Embeddings parameter is expected to be 2-dimensional"); auto rows = embeddings.size(0); auto cols = embeddings.size(1); Embedding embedding(EmbeddingOptions(rows, cols) ._weight(embeddings) .padding_idx(options.padding_idx()) .max_norm(options.max_norm()) .norm_type(options.norm_type()) .scale_grad_by_freq(options.scale_grad_by_freq()) .sparse(options.sparse())); embedding->weight.set_requires_grad(!options.freeze()); return embedding; } }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ EmbeddingBag // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Computes sums or means of 'bags' of embeddings, without instantiating the /// intermediate embeddings. /// See https://pytorch.org/docs/main/nn.html#torch.nn.EmbeddingBag to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::EmbeddingBagOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// EmbeddingBag model(EmbeddingBagOptions(10, /// 2).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true).mode(torch::kSum).padding_idx(1)); /// ``` class TORCH_API EmbeddingBagImpl : public torch::nn::Cloneable<EmbeddingBagImpl> { public: EmbeddingBagImpl(int64_t num_embeddings, int64_t embedding_dim) : EmbeddingBagImpl(EmbeddingBagOptions(num_embeddings, embedding_dim)) {} explicit EmbeddingBagImpl(EmbeddingBagOptions options_); void reset() override; void reset_parameters(); /// Pretty prints the `EmbeddingBag` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The `Options` used to configure this `EmbeddingBag` module. EmbeddingBagOptions options; /// The embedding table. Tensor weight; Tensor forward( const Tensor& input, const Tensor& offsets = {}, const Tensor& per_sample_weights = {}); protected: FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())}, {2, AnyValue(Tensor())}) }; /// A `ModuleHolder` subclass for `EmbeddingBagImpl`. /// See the documentation for `EmbeddingBagImpl` class to learn what methods it /// provides, and examples of how to use `EmbeddingBag` with /// `torch::nn::EmbeddingBagOptions`. See the documentation for `ModuleHolder` /// to learn about PyTorch's module storage semantics. class EmbeddingBag : public torch::nn::ModuleHolder<EmbeddingBagImpl> { public: using torch::nn::ModuleHolder<EmbeddingBagImpl>::ModuleHolder; /// See the documentation for `torch::nn::EmbeddingBagFromPretrainedOptions` /// class to learn what optional arguments are supported for this function. static EmbeddingBag from_pretrained( const torch::Tensor& embeddings, const EmbeddingBagFromPretrainedOptions& options = {}) { TORCH_CHECK( embeddings.dim() == 2, "Embeddings parameter is expected to be 2-dimensional"); auto rows = embeddings.size(0); auto cols = embeddings.size(1); EmbeddingBag embeddingbag( EmbeddingBagOptions(rows, cols) ._weight(embeddings) .max_norm(options.max_norm()) .norm_type(options.norm_type()) .scale_grad_by_freq(options.scale_grad_by_freq()) .mode(options.mode()) .sparse(options.sparse()) .padding_idx(options.padding_idx())); embeddingbag->weight.set_requires_grad(!options.freeze()); return embeddingbag; } }; } // namespace torch::nn ```
==================================================================================================================================================== SOURCE CODE FILE: fold.h LINES: 1 SIZE: 2.84 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\fold.h ENCODING: utf-8 ```h #pragma once #include <torch/expanding_array.h> #include <torch/nn/cloneable.h> #include <torch/nn/functional/fold.h> #include <torch/nn/options/fold.h> #include <torch/nn/pimpl.h> #include <torch/types.h> namespace torch::nn { /// Applies fold over a 3-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Fold to learn about /// the exact behavior of this module. /// /// See the documentation for `torch::nn::FoldOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Fold model(FoldOptions({8, 8}, {3, 3}).dilation(2).padding({2, /// 1}).stride(2)); /// ``` class TORCH_API FoldImpl : public torch::nn::Cloneable<FoldImpl> { public: FoldImpl(ExpandingArray<2> output_size, ExpandingArray<2> kernel_size) : FoldImpl(FoldOptions(output_size, kernel_size)) {} explicit FoldImpl(const FoldOptions& options_); void reset() override; /// Pretty prints the `Fold` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input); /// The options with which this `Module` was constructed. FoldOptions options; }; /// A `ModuleHolder` subclass for `FoldImpl`. /// See the documentation for `FoldImpl` class to learn what methods it /// provides, and examples of how to use `Fold` with `torch::nn::FoldOptions`. /// See the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(Fold); // ============================================================================ /// Applies unfold over a 4-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.Unfold to learn about /// the exact behavior of this module. /// /// See the documentation for `torch::nn::UnfoldOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Unfold model(UnfoldOptions({2, 4}).dilation(2).padding({2, 1}).stride(2)); /// ``` class TORCH_API UnfoldImpl : public Cloneable<UnfoldImpl> { public: UnfoldImpl(ExpandingArray<2> kernel_size) : UnfoldImpl(UnfoldOptions(kernel_size)) {} explicit UnfoldImpl(const UnfoldOptions& options_); void reset() override; /// Pretty prints the `Unfold` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input); /// The options with which this `Module` was constructed. UnfoldOptions options; }; /// A `ModuleHolder` subclass for `UnfoldImpl`. /// See the documentation for `UnfoldImpl` class to learn what methods it /// provides, and examples of how to use `Unfold` with /// `torch::nn::UnfoldOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Unfold); } // namespace torch::nn ```
============================================================================================================================================================ SOURCE CODE FILE: instancenorm.h LINES: 1 SIZE: 5.53 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\instancenorm.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/functional/instancenorm.h> #include <torch/nn/modules/batchnorm.h> #include <torch/nn/options/instancenorm.h> namespace torch::nn { /// Base class for all (dimension-specialized) instance norm modules template <size_t D, typename Derived> // NOLINTNEXTLINE(bugprone-crtp-constructor-accessibility) class InstanceNormImpl : public torch::nn::NormImplBase<D, Derived, InstanceNormOptions> { private: inline Tensor apply_instance_norm(const Tensor& input) { return torch::nn::functional::detail::instance_norm( input, this->running_mean, this->running_var, this->weight, this->bias, this->is_training() || !this->options.track_running_stats(), this->options.momentum(), this->options.eps()); } inline Tensor handle_no_batch_input(const Tensor& input) { return this->apply_instance_norm(input.unsqueeze(0)).squeeze(0); } public: using torch::nn::NormImplBase<D, Derived, InstanceNormOptions>::NormImplBase; Tensor forward(const Tensor& input) { this->_check_input_dim(input); // For InstanceNorm1D, 2D is unbatched and 3D is batched // For InstanceNorm2D, 3D is unbatched and 4D is batched // For InstanceNorm3D, 4D is unbatched and 5D is batched // check if input does not have a batch-dim if (input.dim() == D + 1) { return this->handle_no_batch_input(input); } return this->apply_instance_norm(input); } /// Pretty prints the `InstanceNorm{1,2,3}d` module into the given `stream`. void pretty_print(std::ostream& stream) const override { stream << std::boolalpha << "torch::nn::InstanceNorm" << D << "d(" << this->options.num_features() << ", " << "eps=" << this->options.eps() << ", " << "momentum=" << this->options.momentum() << ", " << "affine=" << this->options.affine() << ", " << "track_running_stats=" << this->options.track_running_stats() << ")"; } }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ InstanceNorm1d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the InstanceNorm1d function. /// See https://pytorch.org/docs/main/nn.html#torch.nn.InstanceNorm1d to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::InstanceNorm1dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// InstanceNorm1d /// model(InstanceNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); /// ``` class TORCH_API InstanceNorm1dImpl : public InstanceNormImpl<1, InstanceNorm1dImpl> { protected: void _check_input_dim(const Tensor& input) override; public: using InstanceNormImpl<1, InstanceNorm1dImpl>::InstanceNormImpl; }; /// A `ModuleHolder` subclass for `InstanceNorm1dImpl`. /// See the documentation for `InstanceNorm1dImpl` class to learn what methods /// it provides, and examples of how to use `InstanceNorm1d` with /// `torch::nn::InstanceNorm1dOptions`. See the documentation for `ModuleHolder` /// to learn about PyTorch's module storage semantics. TORCH_MODULE(InstanceNorm1d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ InstanceNorm2d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the InstanceNorm2d function. /// See https://pytorch.org/docs/main/nn.html#torch.nn.InstanceNorm2d to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::InstanceNorm2dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// InstanceNorm2d /// model(InstanceNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); /// ``` class TORCH_API InstanceNorm2dImpl : public InstanceNormImpl<2, InstanceNorm2dImpl> { protected: void _check_input_dim(const Tensor& input) override; public: using InstanceNormImpl<2, InstanceNorm2dImpl>::InstanceNormImpl; }; /// A `ModuleHolder` subclass for `InstanceNorm2dImpl`. /// See the documentation for `InstanceNorm2dImpl` class to learn what methods /// it provides, and examples of how to use `InstanceNorm2d` with /// `torch::nn::InstanceNorm2dOptions`. See the documentation for `ModuleHolder` /// to learn about PyTorch's module storage semantics. TORCH_MODULE(InstanceNorm2d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ InstanceNorm3d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies the InstanceNorm3d function. /// See https://pytorch.org/docs/main/nn.html#torch.nn.InstanceNorm3d to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::InstanceNorm3dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// InstanceNorm3d /// model(InstanceNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); /// ``` class TORCH_API InstanceNorm3dImpl : public InstanceNormImpl<3, InstanceNorm3dImpl> { protected: void _check_input_dim(const Tensor& input) override; public: using InstanceNormImpl<3, InstanceNorm3dImpl>::InstanceNormImpl; }; /// A `ModuleHolder` subclass for `InstanceNorm3dImpl`. /// See the documentation for `InstanceNorm3dImpl` class to learn what methods /// it provides, and examples of how to use `InstanceNorm3d` with /// `torch::nn::InstanceNorm3dOptions`. See the documentation for `ModuleHolder` /// to learn about PyTorch's module storage semantics. TORCH_MODULE(InstanceNorm3d); } // namespace torch::nn ```
====================================================================================================================================================== SOURCE CODE FILE: linear.h LINES: 1 SIZE: 7.52 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\linear.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/cloneable.h> #include <torch/nn/functional/linear.h> #include <torch/nn/module.h> #include <torch/nn/options/linear.h> #include <torch/nn/pimpl.h> #include <torch/types.h> #include <cstddef> #include <utility> #include <vector> namespace torch::nn { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Identity ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// A placeholder identity operator that is argument-insensitive. /// See https://pytorch.org/docs/main/generated/torch.nn.Identity.html to /// learn about the exact behavior of this module. class TORCH_API IdentityImpl : public Cloneable<IdentityImpl> { public: void reset() override; /// Pretty prints the `Identity` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input); }; /// A `ModuleHolder` subclass for `IdentityImpl`. /// See the documentation for `IdentityImpl` class to learn what methods it /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(Identity); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Linear ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies a linear transformation with optional bias. /// See https://pytorch.org/docs/main/generated/torch.nn.Linear.html to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::LinearOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Linear model(LinearOptions(5, 2).bias(false)); /// ``` class TORCH_API LinearImpl : public Cloneable<LinearImpl> { public: LinearImpl(int64_t in_features, int64_t out_features) : LinearImpl(LinearOptions(in_features, out_features)) {} explicit LinearImpl(const LinearOptions& options_); void reset() override; void reset_parameters(); /// Pretty prints the `Linear` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// Transforms the `input` tensor by multiplying with the `weight` and /// optionally adding the `bias`, if `with_bias` is true in the options. Tensor forward(const Tensor& input); /// The options used to configure this module. LinearOptions options; /// The learned weight. Tensor weight; /// The learned bias. If `bias` is false in the `options`, this tensor is /// undefined. Tensor bias; }; /// A `ModuleHolder` subclass for `LinearImpl`. /// See the documentation for `LinearImpl` class to learn what methods it /// provides, and examples of how to use `Linear` with /// `torch::nn::LinearOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Linear); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Flatten ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// A placeholder for Flatten operator /// See https://pytorch.org/docs/main/generated/torch.nn.Flatten.html to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::FlattenOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Flatten model(FlattenOptions().start_dim(2).end_dim(4)); /// ``` class TORCH_API FlattenImpl : public Cloneable<FlattenImpl> { public: explicit FlattenImpl(const FlattenOptions& options_ = {}); void reset() override; /// Pretty prints the `Flatten` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// Applies a flatten transform on the `input`. Tensor forward(const Tensor& input); /// The options used to configure this module. FlattenOptions options; }; /// A `ModuleHolder` subclass for `FlattenImpl`. /// See the documentation for `FlattenImpl` class to learn what methods it /// provides, and examples of how to use `Flatten` with /// `torch::nn::FlattenOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Flatten); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Unflatten // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// A placeholder for unflatten operator /// See https://pytorch.org/docs/main/generated/torch.nn.Unflatten.html to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::UnflattenOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Unflatten model(UnflattenOptions(0, {2, 2})); /// Unflatten model(UnflattenOptions("B", {{"B1", 2}, {"B2", 2}})); /// ``` class TORCH_API UnflattenImpl : public Cloneable<UnflattenImpl> { public: UnflattenImpl(int64_t dim, std::vector<int64_t> sizes) : UnflattenImpl(UnflattenOptions(dim, std::move(sizes))) {} UnflattenImpl(std::string dimname, UnflattenOptions::namedshape_t namedshape) : UnflattenImpl( UnflattenOptions(std::move(dimname), std::move(namedshape))) {} explicit UnflattenImpl(UnflattenOptions options_); void reset() override; /// Pretty prints the `Unflatten` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// Applies an unflatten transform on the `input`. Tensor forward(const Tensor& input); /// The options used to configure this module. UnflattenOptions options; }; /// A `ModuleHolder` subclass for `UnflattenImpl`. /// See the documentation for `UnflattenImpl` class to learn what methods it /// provides, and examples of how to use `Unflatten` with /// `torch::nn::UnflattenOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Unflatten); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Bilinear ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies a billinear transformation with optional bias. /// See https://pytorch.org/docs/main/generated/torch.nn.Bilinear.html to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::BilinearOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// Bilinear model(BilinearOptions(3, 2, 4).bias(false)); /// ``` class TORCH_API BilinearImpl : public Cloneable<BilinearImpl> { public: BilinearImpl(int64_t in1_features, int64_t in2_features, int64_t out_features) : BilinearImpl( BilinearOptions(in1_features, in2_features, out_features)) {} explicit BilinearImpl(const BilinearOptions& options_); void reset() override; void reset_parameters(); /// Pretty prints the `Bilinear` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// Applies a bilinear transform on the `input1` and `input2` tensor by /// multiplying with the `weight` and optionally adding the `bias`, if /// `with_bias` is true in the options. Tensor forward(const Tensor& input1, const Tensor& input2); /// The options used to configure this module. BilinearOptions options; /// The learned weight. Tensor weight; /// The learned bias. If `with_bias` is false in the `options`, this tensor is /// undefined. Tensor bias; }; /// A `ModuleHolder` subclass for `BilinearImpl`. /// See the documentation for `BilinearImpl` class to learn what methods it /// provides, and examples of how to use `Bilinear` with /// `torch::nn::BilinearOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(Bilinear); } // namespace torch::nn ```
==================================================================================================================================================== SOURCE CODE FILE: loss.h LINES: 1 SIZE: 31.04 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\loss.h ENCODING: utf-8 ```h #pragma once #include <torch/expanding_array.h> #include <torch/nn/cloneable.h> #include <torch/nn/functional/loss.h> #include <torch/nn/options/loss.h> #include <torch/nn/pimpl.h> #include <torch/types.h> #include <torch/csrc/Export.h> #include <cstddef> #include <vector> namespace torch::nn { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ L1Loss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that measures the mean absolute error (MAE) between each /// element in the input : math :`x` and target : `y`. /// See https://pytorch.org/docs/main/nn.html#torch.nn.L1Loss to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::L1LossOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// L1Loss model(L1LossOptions(torch::kNone)); /// ``` struct TORCH_API L1LossImpl : Cloneable<L1LossImpl> { explicit L1LossImpl(L1LossOptions options_ = {}); void reset() override; /// Pretty prints the `L1Loss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. L1LossOptions options; }; /// A `ModuleHolder` subclass for `L1LossImpl`. /// See the documentation for `L1LossImpl` class to learn what methods it /// provides, and examples of how to use `L1Loss` with /// `torch::nn::L1LossOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(L1Loss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ KLDivLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// The Kullback-Leibler divergence loss measure /// See https://pytorch.org/docs/main/nn.html#torch.nn.KLDivLoss to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::KLDivLossOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// KLDivLoss model(KLDivLossOptions().reduction(torch::kNone)); /// ``` struct TORCH_API KLDivLossImpl : Cloneable<KLDivLossImpl> { explicit KLDivLossImpl(KLDivLossOptions options_ = {}); void reset() override; /// Pretty prints the `KLDivLoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. KLDivLossOptions options; }; /// A `ModuleHolder` subclass for `KLDivLossImpl`. /// See the documentation for `KLDivLossImpl` class to learn what methods it /// provides, and examples of how to use `KLDivLoss` with /// `torch::nn::KLDivLossOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(KLDivLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MSELoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that measures the mean squared error (squared L2 norm) /// between each element in the input :math:`x` and target :math:`y`. /// See https://pytorch.org/docs/main/nn.html#torch.nn.MSELoss to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::MSELossOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// MSELoss model(MSELossOptions(torch::kNone)); /// ``` struct TORCH_API MSELossImpl : Cloneable<MSELossImpl> { explicit MSELossImpl(MSELossOptions options_ = {}); void reset() override; /// Pretty prints the `MSELoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. MSELossOptions options; }; /// A `ModuleHolder` subclass for `MSELossImpl`. /// See the documentation for `MSELossImpl` class to learn what methods it /// provides, and examples of how to use `MSELoss` with /// `torch::nn::MSELossOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(MSELoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BCELoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that measures the Binary Cross Entropy /// between the target and the output. /// See https://pytorch.org/docs/main/nn.html#torch.nn.BCELoss to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::BCELossOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// BCELoss model(BCELossOptions().reduction(torch::kNone).weight(weight)); /// ``` struct TORCH_API BCELossImpl : Cloneable<BCELossImpl> { explicit BCELossImpl(BCELossOptions options_ = {}); void reset() override; /// Pretty prints the `BCELoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. BCELossOptions options; }; /// A `ModuleHolder` subclass for `BCELossImpl`. /// See the documentation for `BCELossImpl` class to learn what methods it /// provides, and examples of how to use `BCELoss` with /// `torch::nn::BCELossOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(BCELoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HingeEmbeddingLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that measures the loss given an input tensor :math:`x` /// and a labels tensor :math:`y` (containing 1 or -1). /// See https://pytorch.org/docs/main/nn.html#torch.nn.HingeEmbeddingLoss to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::HingeEmbeddingLossOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// HingeEmbeddingLoss /// model(HingeEmbeddingLossOptions().margin(4).reduction(torch::kNone)); /// ``` struct TORCH_API HingeEmbeddingLossImpl : Cloneable<HingeEmbeddingLossImpl> { explicit HingeEmbeddingLossImpl(HingeEmbeddingLossOptions options_ = {}); void reset() override; /// Pretty prints the `HingeEmbeddingLoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. HingeEmbeddingLossOptions options; }; /// A `ModuleHolder` subclass for `HingeEmbeddingLossImpl`. /// See the documentation for `HingeEmbeddingLossImpl` class to learn what /// methods it provides, and examples of how to use `HingeEmbeddingLoss` with /// `torch::nn::HingeEmbeddingLossOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(HingeEmbeddingLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MultiMarginLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that optimizes a multi-class classification hinge /// loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) /// and output :math:`y` (which is a 1D tensor of target class indices, :math:`0 /// \leq y \leq \text{x.size}(1)-1`). See /// https://pytorch.org/docs/main/nn.html#torch.nn.MultiMarginLoss to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::MultiMarginLossOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// MultiMarginLoss model(MultiMarginLossOptions().margin(2).weight(weight)); /// ``` struct TORCH_API MultiMarginLossImpl : public Cloneable<MultiMarginLossImpl> { explicit MultiMarginLossImpl(MultiMarginLossOptions options_ = {}); void reset() override; /// Pretty prints the `MultiMarginLoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. MultiMarginLossOptions options; }; /// A `ModuleHolder` subclass for `MultiMarginLossImpl`. /// See the documentation for `MultiMarginLossImpl` class to learn what methods /// it provides, and examples of how to use `MultiMarginLoss` with /// `torch::nn::MultiMarginLossOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(MultiMarginLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CosineEmbeddingLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that measures the loss given input tensors /// `input1`, `input2`, and a `Tensor` label `target` with values 1 or /// -1. This is used for measuring whether two inputs are similar or /// dissimilar, using the cosine distance, and is typically used for learning /// nonlinear embeddings or semi-supervised learning. /// See https://pytorch.org/docs/main/nn.html#torch.nn.CosineEmbeddingLoss to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::CosineEmbeddingLossOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// CosineEmbeddingLoss model(CosineEmbeddingLossOptions().margin(0.5)); /// ``` struct TORCH_API CosineEmbeddingLossImpl : public Cloneable<CosineEmbeddingLossImpl> { explicit CosineEmbeddingLossImpl(CosineEmbeddingLossOptions options_ = {}); void reset() override; /// Pretty prints the `CosineEmbeddingLoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward( const Tensor& input1, const Tensor& input2, const Tensor& target); /// The options with which this `Module` was constructed. CosineEmbeddingLossOptions options; }; /// A `ModuleHolder` subclass for `CosineEmbeddingLossImpl`. /// See the documentation for `CosineEmbeddingLossImpl` class to learn what /// methods it provides, and examples of how to use `CosineEmbeddingLoss` with /// `torch::nn::CosineEmbeddingLossOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(CosineEmbeddingLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SmoothL1Loss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that uses a squared term if the absolute /// element-wise error falls below beta and an L1 term otherwise. /// It is less sensitive to outliers than the `MSELoss` and in some cases /// prevents exploding gradients (e.g. see the paper `Fast R-CNN` by Ross /// Girshick). See https://pytorch.org/docs/main/nn.html#torch.nn.SmoothL1Loss /// to learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::SmoothL1LossOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// SmoothL1Loss model(SmoothL1LossOptions().reduction(torch::kNone).beta(0.5)); /// ``` struct TORCH_API SmoothL1LossImpl : public Cloneable<SmoothL1LossImpl> { explicit SmoothL1LossImpl(SmoothL1LossOptions options = {}); void reset() override; /// Pretty prints the `L1Loss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. SmoothL1LossOptions options; }; /// A `ModuleHolder` subclass for `SmoothL1LossImpl`. /// See the documentation for `SmoothL1LossImpl` class to learn what methods it /// provides, and examples of how to use `SmoothL1Loss` with /// `torch::nn::SmoothL1LossOptions`. See the documentation for `ModuleHolder` /// to learn about PyTorch's module storage semantics. TORCH_MODULE(SmoothL1Loss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HuberLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that uses a squared term if the absolute /// element-wise error falls below delta and a delta-scaled L1 term otherwise. /// See https://pytorch.org/docs/main/nn.html#torch.nn.HuberLoss to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::HuberLossOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// HuberLoss model(HuberLossOptions().reduction(torch::kNone).delta(0.5)); /// ``` struct TORCH_API HuberLossImpl : public Cloneable<HuberLossImpl> { explicit HuberLossImpl(HuberLossOptions options_ = {}); void reset() override; /// Pretty prints the `HuberLoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. HuberLossOptions options; }; /// A `ModuleHolder` subclass for `HuberLossImpl`. /// See the documentation for `HuberLossImpl` class to learn what methods it /// provides, and examples of how to use `HuberLoss` with /// `torch::nn::HuberLossOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(HuberLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MultiLabelMarginLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that optimizes a multi-class multi-classification /// hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch /// `Tensor`) and output :math:`y` (which is a 2D `Tensor` of target class /// indices). See /// https://pytorch.org/docs/main/nn.html#torch.nn.MultiLabelMarginLoss to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::MultiLabelMarginLossOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// MultiLabelMarginLoss model(MultiLabelMarginLossOptions(torch::kNone)); /// ``` struct TORCH_API MultiLabelMarginLossImpl : public Cloneable<MultiLabelMarginLossImpl> { explicit MultiLabelMarginLossImpl(MultiLabelMarginLossOptions options_ = {}); void reset() override; /// Pretty prints the `L1Loss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. MultiLabelMarginLossOptions options; }; /// A `ModuleHolder` subclass for `MultiLabelMarginLossImpl`. /// See the documentation for `MultiLabelMarginLossImpl` class to learn what /// methods it provides, and examples of how to use `MultiLabelMarginLoss` with /// `torch::nn::MultiLabelMarginLossOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(MultiLabelMarginLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SoftMarginLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that optimizes a two-class classification /// logistic loss between input tensor :math:`x` and target tensor :math:`y` /// (containing 1 or -1). /// See https://pytorch.org/docs/main/nn.html#torch.nn.SoftMarginLoss to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::SoftMarginLossOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// SoftMarginLoss model(SoftMarginLossOptions(torch::kNone)); /// ``` struct TORCH_API SoftMarginLossImpl : public Cloneable<SoftMarginLossImpl> { explicit SoftMarginLossImpl(SoftMarginLossOptions options_ = {}); /// Pretty prints the `SoftMarginLoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; void reset() override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. SoftMarginLossOptions options; }; /// A `ModuleHolder` subclass for `SoftMarginLossImpl`. /// See the documentation for `SoftMarginLossImpl` class to learn what methods /// it provides, and examples of how to use `SoftMarginLoss` with /// `torch::nn::SoftMarginLossOptions`. See the documentation for `ModuleHolder` /// to learn about PyTorch's module storage semantics. TORCH_MODULE(SoftMarginLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MultiLabelSoftMarginLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that optimizes a multi-label one-versus-all /// loss based on max-entropy, between input :math:`x` and target :math:`y` of /// size :math:`(N, C)`. See /// https://pytorch.org/docs/main/nn.html#torch.nn.MultiLabelSoftMarginLoss to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::MultiLabelSoftMarginLossOptions` class /// to learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// MultiLabelSoftMarginLoss /// model(MultiLabelSoftMarginLossOptions().reduction(torch::kNone).weight(weight)); /// ``` struct TORCH_API MultiLabelSoftMarginLossImpl : public Cloneable<MultiLabelSoftMarginLossImpl> { explicit MultiLabelSoftMarginLossImpl( MultiLabelSoftMarginLossOptions options_ = {}); /// Pretty prints the `MultiLabelSoftMarginLoss` module into the given /// `stream`. void pretty_print(std::ostream& stream) const override; void reset() override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. MultiLabelSoftMarginLossOptions options; }; /// A `ModuleHolder` subclass for `MultiLabelSoftMarginLossImpl`. /// See the documentation for `MultiLabelSoftMarginLossImpl` class to learn what /// methods it provides, and examples of how to use `MultiLabelSoftMarginLoss` /// with `torch::nn::MultiLabelSoftMarginLossOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(MultiLabelSoftMarginLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TripletMarginLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that measures the triplet loss given an input /// tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater /// than :math:`0`. This is used for measuring a relative similarity between /// samples. A triplet is composed by `a`, `p` and `n` (i.e., `anchor`, /// `positive examples` and `negative examples` respectively). The /// shapes of all input tensors should be :math:`(N, D)`. /// See https://pytorch.org/docs/main/nn.html#torch.nn.TripletMarginLoss to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::TripletMarginLossOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// TripletMarginLoss /// model(TripletMarginLossOptions().margin(3).p(2).eps(1e-06).swap(false)); /// ``` struct TORCH_API TripletMarginLossImpl : public Cloneable<TripletMarginLossImpl> { explicit TripletMarginLossImpl(TripletMarginLossOptions options_ = {}); void reset() override; /// Pretty prints the `TripletMarginLoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward( const Tensor& anchor, const Tensor& positive, const Tensor& negative); /// The options with which this `Module` was constructed. TripletMarginLossOptions options; }; /// A `ModuleHolder` subclass for `TripletMarginLossImpl`. /// See the documentation for `TripletMarginLossImpl` class to learn what /// methods it provides, and examples of how to use `TripletMarginLoss` with /// `torch::nn::TripletMarginLossOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(TripletMarginLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TripletMarginWithDistanceLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that measures the triplet loss given input /// tensors :math:`a`, :math:`p`, and :math:`n` (representing anchor, /// positive, and negative examples, respectively); and a nonnegative, /// real-valued function /// ("distance function") used to compute the relationships between the anchor /// and positive example ("positive distance") and the anchor and negative /// example ("negative distance"). /// See /// https://pytorch.org/docs/main/nn.html#torch.nn.TripletMarginWithDistanceLoss /// to learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::TripletMarginWithDistanceLossOptions` /// class to learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// TripletMarginWithDistanceLoss /// model(TripletMarginWithDistanceLossOptions().margin(3).swap(false)); /// ``` struct TORCH_API TripletMarginWithDistanceLossImpl : public Cloneable<TripletMarginWithDistanceLossImpl> { explicit TripletMarginWithDistanceLossImpl( TripletMarginWithDistanceLossOptions options_ = {}); void reset() override; /// Pretty prints the `TripletMarginWithDistanceLoss` module into the given /// `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward( const Tensor& anchor, const Tensor& positive, const Tensor& negative); /// The options with which this `Module` was constructed. TripletMarginWithDistanceLossOptions options; }; /// A `ModuleHolder` subclass for `TripletMarginWithDistanceLossImpl`. /// See the documentation for `TripletMarginWithDistanceLossImpl` class to learn /// what methods it provides, and examples of how to use /// `TripletMarginWithDistanceLoss` with /// `torch::nn::TripletMarginWithDistanceLossOptions`. /// See the documentation for `ModuleHolder` to learn about PyTorch's /// module storage semantics. TORCH_MODULE(TripletMarginWithDistanceLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CTCLoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// The Connectionist Temporal Classification loss. /// See https://pytorch.org/docs/main/nn.html#torch.nn.CTCLoss to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::CTCLossOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// CTCLoss /// model(CTCLossOptions().blank(42).zero_infinity(false).reduction(torch::kSum)); /// ``` struct TORCH_API CTCLossImpl : public Cloneable<CTCLossImpl> { explicit CTCLossImpl(CTCLossOptions options_ = {}); void reset() override; /// Pretty prints the `CTCLoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward( const Tensor& log_probs, const Tensor& targets, const Tensor& input_lengths, const Tensor& target_lengths); /// The options with which this `Module` was constructed. CTCLossOptions options; }; /// A `ModuleHolder` subclass for `CTCLossImpl`. /// See the documentation for `CTCLossImpl` class to learn what methods it /// provides, and examples of how to use `CTCLoss` with /// `torch::nn::CTCLossOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(CTCLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PoissonNLLLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Negative log likelihood loss with Poisson distribution of target. /// See https://pytorch.org/docs/main/nn.html#torch.nn.PoissonNLLLoss to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::PoissonNLLLossOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// PoissonNLLLoss /// model(PoissonNLLLossOptions().log_input(false).full(true).eps(0.42).reduction(torch::kSum)); /// ``` struct TORCH_API PoissonNLLLossImpl : public Cloneable<PoissonNLLLossImpl> { explicit PoissonNLLLossImpl(PoissonNLLLossOptions options_ = {}); void reset() override; /// Pretty prints the `PoissonNLLLoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& log_input, const Tensor& targets); /// The options with which this `Module` was constructed. PoissonNLLLossOptions options; }; /// A `ModuleHolder` subclass for `PoissonNLLLossImpl`. /// See the documentation for `PoissonNLLLossImpl` class to learn what methods /// it provides, and examples of how to use `PoissonNLLLoss` with /// `torch::nn::PoissonNLLLossOptions`. See the documentation for `ModuleHolder` /// to learn about PyTorch's module storage semantics. TORCH_MODULE(PoissonNLLLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MarginRankingLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that measures the loss given /// inputs :math:`x1`, :math:`x2`, two 1D mini-batch `Tensors`, /// and a label 1D mini-batch tensor :math:`y` (containing 1 or -1). /// See https://pytorch.org/docs/main/nn.html#torch.nn.MarginRankingLoss to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::MarginRankingLossOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// MarginRankingLoss /// model(MarginRankingLossOptions().margin(0.5).reduction(torch::kSum)); /// ``` struct TORCH_API MarginRankingLossImpl : public Cloneable<MarginRankingLossImpl> { explicit MarginRankingLossImpl(MarginRankingLossOptions options_ = {}); void reset() override; /// Pretty prints the `MarginRankingLoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward( const Tensor& input1, const Tensor& input2, const Tensor& targets); /// The options with which this `Module` was constructed. MarginRankingLossOptions options; }; /// A `ModuleHolder` subclass for `MarginRankingLossImpl`. /// See the documentation for `MarginRankingLossImpl` class to learn what /// methods it provides, and examples of how to use `MarginRankingLoss` with /// `torch::nn::MarginRankingLossOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(MarginRankingLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NLLLoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// The negative log likelihood loss. It is useful to train a classification /// problem with `C` classes. /// See https://pytorch.org/docs/main/nn.html#torch.nn.NLLLoss to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::NLLLossOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// NLLLoss model(NLLLossOptions().ignore_index(-100).reduction(torch::kMean)); /// ``` struct TORCH_API NLLLossImpl : public Cloneable<NLLLossImpl> { explicit NLLLossImpl(NLLLossOptions options_ = {}); /// Pretty prints the `NLLLoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; void reset() override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. NLLLossOptions options; /// A manual rescaling weight given to to each class. Tensor weight; }; /// A `ModuleHolder` subclass for `NLLLossImpl`. /// See the documentation for `NLLLossImpl` class to learn what methods it /// provides, and examples of how to use `NLLLoss` with /// `torch::nn::NLLLossOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(NLLLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CrossEntropyLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Creates a criterion that computes cross entropy loss between input and /// target. See /// https://pytorch.org/docs/main/nn.html#torch.nn.CrossEntropyLoss to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::CrossEntropyLossOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// CrossEntropyLoss /// model(CrossEntropyLossOptions().ignore_index(-100).reduction(torch::kMean)); /// ``` struct TORCH_API CrossEntropyLossImpl : public Cloneable<CrossEntropyLossImpl> { explicit CrossEntropyLossImpl(CrossEntropyLossOptions options_ = {}); void reset() override; /// Pretty prints the `CrossEntropyLoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. CrossEntropyLossOptions options; /// A manual rescaling weight given to to each class. Tensor weight; }; /// A `ModuleHolder` subclass for `CrossEntropyLossImpl`. /// See the documentation for `CrossEntropyLossImpl` class to learn what methods /// it provides, and examples of how to use `CrossEntropyLoss` with /// `torch::nn::CrossEntropyLossOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(CrossEntropyLoss); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BCEWithLogitsLoss // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// This loss combines a `Sigmoid` layer and the `BCELoss` in one single /// class. This version is more numerically stable than using a plain `Sigmoid` /// followed by a `BCELoss` as, by combining the operations into one layer, /// we take advantage of the log-sum-exp trick for numerical stability. /// See https://pytorch.org/docs/main/nn.html#torch.nn.BCEWithLogitsLoss to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::BCEWithLogitsLossOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// BCEWithLogitsLoss /// model(BCEWithLogitsLossOptions().reduction(torch::kNone).weight(weight)); /// ``` struct TORCH_API BCEWithLogitsLossImpl : public Cloneable<BCEWithLogitsLossImpl> { explicit BCEWithLogitsLossImpl(BCEWithLogitsLossOptions options_ = {}); void reset() override; /// Pretty prints the `BCEWithLogitsLoss` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input, const Tensor& target); /// The options with which this `Module` was constructed. BCEWithLogitsLossOptions options; /// A manual rescaling weight given to the loss of each batch element. Tensor weight; /// A weight of positive examples. Tensor pos_weight; }; /// A `ModuleHolder` subclass for `BCEWithLogitsLossImpl`. /// See the documentation for `BCEWithLogitsLossImpl` class to learn what /// methods it provides, and examples of how to use `BCEWithLogitsLoss` with /// `torch::nn::BCEWithLogitsLossOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(BCEWithLogitsLoss); } // namespace torch::nn ```
============================================================================================================================================================= SOURCE CODE FILE: normalization.h LINES: 1 SIZE: 6.96 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\normalization.h ENCODING: utf-8 ```h #pragma once #include <torch/nn/cloneable.h> #include <torch/nn/functional/normalization.h> #include <torch/nn/modules/_functions.h> #include <torch/nn/options/normalization.h> #include <torch/nn/pimpl.h> #include <torch/types.h> #include <cstddef> #include <utility> #include <vector> namespace torch::nn { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LayerNorm ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies Layer Normalization over a mini-batch of inputs as described in /// the paper `Layer Normalization`_ . /// See https://pytorch.org/docs/main/nn.html#torch.nn.LayerNorm to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::LayerNormOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// LayerNorm model(LayerNormOptions({2, /// 2}).elementwise_affine(false).eps(2e-5)); /// ``` class TORCH_API LayerNormImpl : public torch::nn::Cloneable<LayerNormImpl> { public: LayerNormImpl(std::vector<int64_t> normalized_shape) : LayerNormImpl(LayerNormOptions(std::move(normalized_shape))) {} explicit LayerNormImpl(LayerNormOptions options_); void reset() override; void reset_parameters(); /// Pretty prints the `LayerNorm` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// Applies layer normalization over a mini-batch of inputs as described in /// the paper `Layer Normalization`_ . /// /// The mean and standard-deviation are calculated separately over the last /// certain number dimensions which have to be of the shape specified by /// input `normalized_shape`. /// /// `Layer Normalization`: https://arxiv.org/abs/1607.06450 Tensor forward(const Tensor& input); /// The options with which this module was constructed. LayerNormOptions options; /// The learned weight. /// Initialized to ones if the `elementwise_affine` option is set to `true` /// upon construction. Tensor weight; /// The learned bias. /// Initialized to zeros `elementwise_affine` option is set to `true` upon /// construction. Tensor bias; }; /// A `ModuleHolder` subclass for `LayerNormImpl`. /// See the documentation for `LayerNormImpl` class to learn what methods it /// provides, and examples of how to use `LayerNorm` with /// `torch::nn::LayerNormOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(LayerNorm); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LocalResponseNorm // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies local response normalization over an input signal composed /// of several input planes, where channels occupy the second dimension. /// Applies normalization across channels. /// See https://pytorch.org/docs/main/nn.html#torch.nn.LocalResponseNorm to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::LocalResponseNormOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// LocalResponseNorm /// model(LocalResponseNormOptions(2).alpha(0.0002).beta(0.85).k(2.)); /// ``` class TORCH_API LocalResponseNormImpl : public Cloneable<LocalResponseNormImpl> { public: LocalResponseNormImpl(int64_t size) : LocalResponseNormImpl(LocalResponseNormOptions(size)) {} explicit LocalResponseNormImpl(const LocalResponseNormOptions& options_); Tensor forward(const Tensor& input); void reset() override; /// Pretty prints the `LocalResponseNormImpl` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. LocalResponseNormOptions options; }; /// A `ModuleHolder` subclass for `LocalResponseNormImpl`. /// See the documentation for `LocalResponseNormImpl` class to learn what /// methods it provides, and examples of how to use `LocalResponseNorm` with /// `torch::nn::LocalResponseNormOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(LocalResponseNorm); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CrossMapLRN2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// See the documentation for `torch::nn::CrossMapLRN2dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// CrossMapLRN2d model(CrossMapLRN2dOptions(3).alpha(1e-5).beta(0.1).k(10)); /// ``` class TORCH_API CrossMapLRN2dImpl : public torch::nn::Cloneable<CrossMapLRN2dImpl> { public: CrossMapLRN2dImpl(int64_t size) : CrossMapLRN2dImpl(CrossMapLRN2dOptions(size)) {} explicit CrossMapLRN2dImpl(const CrossMapLRN2dOptions& options_) : options(options_) {} void reset() override; /// Pretty prints the `CrossMapLRN2d` module into the given `stream`. void pretty_print(std::ostream& stream) const override; torch::Tensor forward(const torch::Tensor& input); CrossMapLRN2dOptions options; }; /// A `ModuleHolder` subclass for `CrossMapLRN2dImpl`. /// See the documentation for `CrossMapLRN2dImpl` class to learn what methods it /// provides, and examples of how to use `CrossMapLRN2d` with /// `torch::nn::CrossMapLRN2dOptions`. See the documentation for `ModuleHolder` /// to learn about PyTorch's module storage semantics. TORCH_MODULE(CrossMapLRN2d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GroupNorm ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies Group Normalization over a mini-batch of inputs as described in /// the paper `Group Normalization`_ . /// See https://pytorch.org/docs/main/nn.html#torch.nn.GroupNorm to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::GroupNormOptions` class to learn what /// constructor arguments are supported for this module. /// /// Example: /// ``` /// GroupNorm model(GroupNormOptions(2, 2).eps(2e-5).affine(false)); /// ``` class TORCH_API GroupNormImpl : public torch::nn::Cloneable<GroupNormImpl> { public: GroupNormImpl(int64_t num_groups, int64_t num_channels) : GroupNormImpl(GroupNormOptions(num_groups, num_channels)) {} explicit GroupNormImpl(const GroupNormOptions& options_); void reset() override; void reset_parameters(); /// Pretty prints the `GroupNorm` module into the given `stream`. void pretty_print(std::ostream& stream) const override; Tensor forward(const Tensor& input); /// The options with which this module was constructed. GroupNormOptions options; /// The learned weight. Tensor weight; /// The learned bias. Tensor bias; }; /// A `ModuleHolder` subclass for `GroupNormImpl`. /// See the documentation for `GroupNormImpl` class to learn what methods it /// provides, and examples of how to use `GroupNorm` with /// `torch::nn::GroupNormOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(GroupNorm); } // namespace torch::nn ```
======================================================================================================================================================= SOURCE CODE FILE: padding.h LINES: 1 SIZE: 14.38 KB PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\padding.h ENCODING: utf-8 ```h #pragma once #include <torch/expanding_array.h> #include <torch/nn/cloneable.h> #include <torch/nn/functional/padding.h> #include <torch/csrc/Export.h> namespace torch::nn { /// Base class for all (dimension-specialized) ReflectionPad modules. template <size_t D, typename Derived> class TORCH_API ReflectionPadImpl : public torch::nn::Cloneable<Derived> { public: ReflectionPadImpl(ExpandingArray<D * 2> padding) : ReflectionPadImpl(ReflectionPadOptions<D>(padding)) {} explicit ReflectionPadImpl(const ReflectionPadOptions<D>& options_); void reset() override; Tensor forward(const Tensor& input); /// Pretty prints the `ReflectionPad{1,2}d` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. ReflectionPadOptions<D> options; }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReflectionPad1d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies ReflectionPad over a 1-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ReflectionPad1d to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::ReflectionPad1dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// ReflectionPad1d model(ReflectionPad1dOptions({3, 1})); /// ``` class TORCH_API ReflectionPad1dImpl : public ReflectionPadImpl<1, ReflectionPad1dImpl> { public: using ReflectionPadImpl<1, ReflectionPad1dImpl>::ReflectionPadImpl; }; /// A `ModuleHolder` subclass for `ReflectionPad1dImpl`. /// See the documentation for `ReflectionPad1dImpl` class to learn what methods /// it provides, and examples of how to use `ReflectionPad1d` with /// `torch::nn::ReflectionPad1dOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(ReflectionPad1d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReflectionPad2d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies ReflectionPad over a 2-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ReflectionPad2d to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::ReflectionPad2dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// ReflectionPad2d model(ReflectionPad2dOptions({1, 1, 2, 0})); /// ``` class TORCH_API ReflectionPad2dImpl : public ReflectionPadImpl<2, ReflectionPad2dImpl> { public: using ReflectionPadImpl<2, ReflectionPad2dImpl>::ReflectionPadImpl; }; /// A `ModuleHolder` subclass for `ReflectionPad2dImpl`. /// See the documentation for `ReflectionPad2dImpl` class to learn what methods /// it provides, and examples of how to use `ReflectionPad2d` with /// `torch::nn::ReflectionPad2dOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(ReflectionPad2d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReflectionPad3d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies ReflectionPad over a 3-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ReflectionPad3d to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::ReflectionPad3dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// ReflectionPad3d model(ReflectionPad3dOptions(1)); /// ReflectionPad3d model(ReflectionPad3dOptions({1, 1, 2, 0, 1, 2})); /// ``` class TORCH_API ReflectionPad3dImpl : public ReflectionPadImpl<3, ReflectionPad3dImpl> { public: using ReflectionPadImpl<3, ReflectionPad3dImpl>::ReflectionPadImpl; }; /// A `ModuleHolder` subclass for `ReflectionPad3dImpl`. /// See the documentation for `ReflectionPad3dImpl` class to learn what methods /// it provides, and examples of how to use `ReflectionPad3d` with /// `torch::nn::ReflectionPad3dOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(ReflectionPad3d); // ============================================================================ /// Base class for all (dimension-specialized) ReplicationPad modules. template <size_t D, typename Derived> class TORCH_API ReplicationPadImpl : public torch::nn::Cloneable<Derived> { public: ReplicationPadImpl(ExpandingArray<D * 2> padding) : ReplicationPadImpl(ReplicationPadOptions<D>(padding)) {} explicit ReplicationPadImpl(const ReplicationPadOptions<D>& options_); void reset() override; Tensor forward(const Tensor& input); /// Pretty prints the `ReplicationPad{1,2}d` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. ReplicationPadOptions<D> options; }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReplicationPad1d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies ReplicationPad over a 1-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ReplicationPad1d to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::ReplicationPad1dOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// ReplicationPad1d model(ReplicationPad1dOptions({3, 1})); /// ``` class TORCH_API ReplicationPad1dImpl : public ReplicationPadImpl<1, ReplicationPad1dImpl> { public: using ReplicationPadImpl<1, ReplicationPad1dImpl>::ReplicationPadImpl; }; /// A `ModuleHolder` subclass for `ReplicationPad1dImpl`. /// See the documentation for `ReplicationPad1dImpl` class to learn what methods /// it provides, and examples of how to use `ReplicationPad1d` with /// `torch::nn::ReplicationPad1dOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(ReplicationPad1d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReplicationPad2d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies ReplicationPad over a 2-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ReplicationPad2d to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::ReplicationPad2dOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// ReplicationPad2d model(ReplicationPad2dOptions({1, 1, 2, 0})); /// ``` class TORCH_API ReplicationPad2dImpl : public ReplicationPadImpl<2, ReplicationPad2dImpl> { public: using ReplicationPadImpl<2, ReplicationPad2dImpl>::ReplicationPadImpl; }; /// A `ModuleHolder` subclass for `ReplicationPad2dImpl`. /// See the documentation for `ReplicationPad2dImpl` class to learn what methods /// it provides, and examples of how to use `ReplicationPad2d` with /// `torch::nn::ReplicationPad2dOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(ReplicationPad2d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReplicationPad3d // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies ReplicationPad over a 3-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ReplicationPad3d to /// learn about the exact behavior of this module. /// /// See the documentation for `torch::nn::ReplicationPad3dOptions` class to /// learn what constructor arguments are supported for this module. /// /// Example: /// ``` /// ReplicationPad3d model(ReplicationPad3dOptions({1, 2, 1, 2, 1, 2})); /// ``` class TORCH_API ReplicationPad3dImpl : public ReplicationPadImpl<3, ReplicationPad3dImpl> { public: using ReplicationPadImpl<3, ReplicationPad3dImpl>::ReplicationPadImpl; }; /// A `ModuleHolder` subclass for `ReplicationPad3dImpl`. /// See the documentation for `ReplicationPad3dImpl` class to learn what methods /// it provides, and examples of how to use `ReplicationPad3d` with /// `torch::nn::ReplicationPad3dOptions`. See the documentation for /// `ModuleHolder` to learn about PyTorch's module storage semantics. TORCH_MODULE(ReplicationPad3d); // ============================================================================ /// Base class for all (dimension-specialized) ZeroPad modules. template <size_t D, typename Derived> class TORCH_API ZeroPadImpl : public torch::nn::Cloneable<Derived> { public: ZeroPadImpl(ExpandingArray<D * 2> padding) : ZeroPadImpl(ZeroPadOptions<D>(padding)) {} explicit ZeroPadImpl(const ZeroPadOptions<D>& options_); void reset() override; Tensor forward(const Tensor& input); /// Pretty prints the `ZeroPad{1,2}d` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. ZeroPadOptions<D> options; }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ZeroPad1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Applies ZeroPad over a 1-D input. class TORCH_API ZeroPad1dImpl : public ZeroPadImpl<1, ZeroPad1dImpl> { public: using ZeroPadImpl<1, ZeroPad1dImpl>::ZeroPadImpl; }; /// A `ModuleHolder` subclass for `ZeroPad1dImpl`. /// See the documentation for `ZeroPad1dImpl` class to learn what methods it /// provides, and examples of how to use `ZeroPad1d` with /// `torch::nn::ZeroPad1dOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(ZeroPad1d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ZeroPad2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Applies ZeroPad over a 2-D input. class TORCH_API ZeroPad2dImpl : public ZeroPadImpl<2, ZeroPad2dImpl> { public: using ZeroPadImpl<2, ZeroPad2dImpl>::ZeroPadImpl; }; /// A `ModuleHolder` subclass for `ZeroPad2dImpl`. /// See the documentation for `ZeroPad2dImpl` class to learn what methods it /// provides, and examples of how to use `ZeroPad2d` with /// `torch::nn::ZeroPad2dOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(ZeroPad2d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ZeroPad3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Applies ZeroPad over a 3-D input. class TORCH_API ZeroPad3dImpl : public ZeroPadImpl<3, ZeroPad3dImpl> { public: using ZeroPadImpl<3, ZeroPad3dImpl>::ZeroPadImpl; }; /// A `ModuleHolder` subclass for `ZeroPad3dImpl`. /// See the documentation for `ZeroPad3dImpl` class to learn what methods it /// provides, and examples of how to use `ZeroPad3d` with /// `torch::nn::ZeroPad3dOptions`. See the documentation for `ModuleHolder` to /// learn about PyTorch's module storage semantics. TORCH_MODULE(ZeroPad3d); // ============================================================================ /// Base class for all (dimension-specialized) ConstantPad modules. template <size_t D, typename Derived> class TORCH_API ConstantPadImpl : public torch::nn::Cloneable<Derived> { public: ConstantPadImpl(ExpandingArray<D * 2> padding, double value) : ConstantPadImpl(ConstantPadOptions<D>(padding, value)) {} explicit ConstantPadImpl(const ConstantPadOptions<D>& options_); void reset() override; Tensor forward(const Tensor& input); /// Pretty prints the `ConstantPad{1,2}d` module into the given `stream`. void pretty_print(std::ostream& stream) const override; /// The options with which this `Module` was constructed. ConstantPadOptions<D> options; }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConstantPad1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies ConstantPad over a 1-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ConstantPad1d to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::ConstantPad1dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// ConstantPad1d model(ConstantPad1dOptions({3, 1}, 3.5)); /// ``` class TORCH_API ConstantPad1dImpl : public ConstantPadImpl<1, ConstantPad1dImpl> { public: using ConstantPadImpl<1, ConstantPad1dImpl>::ConstantPadImpl; }; /// A `ModuleHolder` subclass for `ConstantPad1dImpl`. /// See the documentation for `ConstantPad1dImpl` class to learn what methods it /// provides, and examples of how to use `ConstantPad1d` with /// `torch::nn::ConstantPad1dOptions`. See the documentation for `ModuleHolder` /// to learn about PyTorch's module storage semantics. TORCH_MODULE(ConstantPad1d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConstantPad2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies ConstantPad over a 2-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ConstantPad2d to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::ConstantPad2dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// ConstantPad2d model(ConstantPad2dOptions({3, 0, 2, 1}, 3.5)); /// ``` class TORCH_API ConstantPad2dImpl : public ConstantPadImpl<2, ConstantPad2dImpl> { public: using ConstantPadImpl<2, ConstantPad2dImpl>::ConstantPadImpl; }; /// A `ModuleHolder` subclass for `ConstantPad2dImpl`. /// See the documentation for `ConstantPad2dImpl` class to learn what methods it /// provides, and examples of how to use `ConstantPad2d` with /// `torch::nn::ConstantPad2dOptions`. See the documentation for `ModuleHolder` /// to learn about PyTorch's module storage semantics. TORCH_MODULE(ConstantPad2d); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConstantPad3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// Applies ConstantPad over a 3-D input. /// See https://pytorch.org/docs/main/nn.html#torch.nn.ConstantPad3d to learn /// about the exact behavior of this module. /// /// See the documentation for `torch::nn::ConstantPad3dOptions` class to learn /// what constructor arguments are supported for this module. /// /// Example: /// ``` /// ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2}, 3.5)); /// ``` class TORCH_API ConstantPad3dImpl : public ConstantPadImpl<3, ConstantPad3dImpl> { public: using ConstantPadImpl<3, ConstantPad3dImpl>::ConstantPadImpl; }; /// A `ModuleHolder` subclass for `ConstantPad3dImpl`. /// See the documentation for `ConstantPad3dImpl` class to learn what methods it /// provides, and examples of how to use `ConstantPad3d` with /// `torch::nn::ConstantPad3dOptions`. See the documentation for `ModuleHolder` /// to learn about PyTorch's module storage semantics. TORCH_MODULE(ConstantPad3d); } // namespace torch::nn ```