diff --git a/ckpts/universal/global_step20/zero/10.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/10.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..cecf2428daf45ef2c9d908e30f5fffc4abc56974 --- /dev/null +++ b/ckpts/universal/global_step20/zero/10.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:178341ac824177eceae2cde0ed6b39d6b7ac5888b8422cb06183e4d3b875f09b +size 16778396 diff --git a/ckpts/universal/global_step20/zero/18.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step20/zero/18.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..3a65525caf865b9cf737484e57ca2f96ca6d7609 --- /dev/null +++ b/ckpts/universal/global_step20/zero/18.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cafda7e1f668df1c974c23ba2e76babb4c8fb21858726e99a6486cf5faed8ac +size 33555533 diff --git a/ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..dfb57af8e0562997a0c627739958482483bdb265 --- /dev/null +++ b/ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:552aa8ecaf09955656d67ebcbbf5a85b3f0ec86a736176c6ff1e072f5e84d7a8 +size 33555627 diff --git a/ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..73af6d2cad910ba6081183ac0e2b4cd2748176dc --- /dev/null +++ b/ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f9b340505ee4aaf7d2fcd6a8d81f8888a21c7ba3725c432b4377c0af8b5847d +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader.h new file mode 100644 index 0000000000000000000000000000000000000000..06ea83d8a2327c13aafbe0aa35cd44de81843c07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include + +#include + +#include + +#include +#include +#include +#include + +namespace torch { +namespace data { + +/// Creates a `DataLoader` instance for a stateless `dataset`, a `sampler` and +/// some `options`. +template +torch::disable_if_t< + Dataset::is_stateful, + std::unique_ptr>> +make_data_loader(Dataset dataset, Sampler sampler, DataLoaderOptions options) { + return std::make_unique>( + std::move(dataset), std::move(sampler), std::move(options)); +} + +/// Creates a `DataLoader` instance for a stateless `dataset` and some +/// `options`. A sampler (by default a `RandomSampler`) will be constructed from +/// the size of the dataset. +template +torch::disable_if_t< + Dataset::is_stateful || !std::is_constructible::value, + std::unique_ptr>> +make_data_loader( + Dataset dataset, + DataLoaderOptions options = DataLoaderOptions()) { + const optional size = dataset.size(); + TORCH_CHECK( + size.has_value(), + "Expected the dataset to be sized in " + "order to construct the Sampler"); + return make_data_loader( + std::move(dataset), Sampler(*size), std::move(options)); +} + +/// Creates a `DataLoader` for a stateful `dataset` and some `options`. +template > +std::unique_ptr> make_data_loader( + Dataset dataset, + DataLoaderOptions options = DataLoaderOptions()) { + return std::make_unique>( + std::move(dataset), std::move(options)); +} +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/base.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/base.h new file mode 100644 index 0000000000000000000000000000000000000000..72c4f337fbf4b6cd79c7e08297d7da8683b65e68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/base.h @@ -0,0 +1,255 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace data { +template +class DataLoaderBase { + public: + using BatchType = Batch; + using BatchRequestType = BatchRequest; + + /// Constructs a new DataLoader from a `dataset` to sample from, `options` + /// to configure the DataLoader with, and a `sampler` that specifies the + /// sampling strategy. + DataLoaderBase( + DataLoaderOptions options, + std::unique_ptr main_thread_dataset = nullptr) + : options_(std::move(options)), + main_thread_dataset_(std::move(main_thread_dataset)), + sequencer_(new_sequencer()) {} + + // NOLINTNEXTLINE(bugprone-exception-escape) + virtual ~DataLoaderBase() { + join(); + } + + /// Returns an iterator into the DataLoader. The lifetime of the iterator is + /// bound to the DataLoader. In C++ standards language, the category of the + /// iterator is `OutputIterator`. See + /// https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + /// means. In short: you may increment the iterator and dereference it, but + /// cannot go back, or step forward more than one position at a time. When the + /// DataLoader is exhausted, it will compare equal with the special + /// "sentinel" iterator returned by `DataLoader::end()`. Most of the time, you + /// should only use range-for loops to loop over the DataLoader, but + /// standard algorithms like `std::copy(dataloader.begin(), dataloader.end(), + /// output_iterator)` are supported too. + Iterator begin() { + TORCH_CHECK( + shuttle_.in_flight_jobs() == 0, + "Attempted to get a new DataLoader iterator " + "while another iterator is not yet exhausted"); + reset(); + return Iterator(std::make_unique>( + [this] { return this->next(); })); + } + + /// Returns a special "sentinel" iterator that compares equal with a + /// non-sentinel iterator once the DataLoader is exhausted. + Iterator end() { + return Iterator(std::make_unique>()); + } + + /// Joins the DataLoader's worker threads and drains internal queues. + /// This function may only be invoked from the main thread (in which the + /// DataLoader lives). + void join() { + if (joined_) { + return; + } + shuttle_.drain(); + // Send one 'quit' message per worker. Since a worker dies (exits its + // thread) after receiving this message, each `QuitWorker()` message will be + // read by exactly one worker. + for (const auto w : c10::irange(options_.workers)) { + (void)w; // Suppress unused variable warning + push_job(QuitWorker()); + } + for (auto& worker : workers_) { + worker.join(); + } + joined_ = true; + } + + /// Returns the options with which the DataLoader was configured. + const FullDataLoaderOptions& options() const noexcept { + return options_; + } + + protected: + /// Simple mix-in to give something a sequence number. + struct Sequenced { + Sequenced() = default; + Sequenced(size_t sqn) : sequence_number(sqn) {} + size_t sequence_number; + }; + + struct QuitWorker {}; + + /// A `Job` is either a `BatchRequest` (new indices to fetch data at) or a + /// `QuitWorker` object, to indicate the worker should shut down. + struct Job : Sequenced { + Job() = default; + Job(QuitWorker q, size_t sqn) : Sequenced(sqn), quit(q) {} + Job(BatchRequest&& i, size_t sqn) + : Sequenced(sqn), batch_request(std::move(i)) {} + optional quit; + optional batch_request; + }; + + /// The finished result of a job. + struct Result : Sequenced { + Result() = default; + Result(optional&& b, size_t sqn) + : Sequenced(sqn), batch(std::move(b)) {} + Result(std::exception_ptr exception, size_t sqn) + : Sequenced(sqn), exception(std::move(exception)) {} + optional batch; + std::exception_ptr exception; + }; + + /// Subclass hook for getting the next batch request. The stateless case will + /// ask the sampler for a new batch request (e.g. a vector of indices), while + /// the stateful one will simply return the batch size. + virtual optional get_batch_request() = 0; + + /// Resets the internal state of the DataLoader, optionally pre-fetching + /// new jobs. + virtual void reset() { + shuttle_.drain(); + sequence_number_ = 0; + sequencer_ = new_sequencer(); + prefetch(); + } + + /// Schedules `requested_jobs` many new batches to be fetched. The actual + /// number of jobs scheduled may be less if the DataLoader exhausts. + void prefetch(size_t requested_jobs) { + for (const auto r : c10::irange(requested_jobs)) { + (void)r; // Suppress unused variable + if (auto batch_request = get_batch_request()) { + this->push_job(std::move(*batch_request)); + } else { + break; + } + } + } + + /// Schedules the maximum number of jobs (based on the `max_jobs` option). + void prefetch() { + prefetch(options_.max_jobs); + } + + /// Returns the next batch of data, or an empty `optional` if the DataLoader + /// is exhausted. This operation will block until a batch is available if one + /// is still expected. + optional next() { + if (options_.workers > 0) { + while (optional result = this->pop_result()) { + if (result->exception) { + throw WorkerException(result->exception); + } else if (result->batch) { + prefetch(1); + return std::move(result->batch); + } + } + } else if (auto batch_request = get_batch_request()) { + return this->main_thread_dataset_->get_batch(std::move(*batch_request)); + } + return nullopt; + } + + /// The function that worker threads run. + void worker_thread(Dataset& dataset) { + while (true) { + auto job = shuttle_.pop_job(); + if (job.quit) { + break; + } + try { + auto batch = dataset.get_batch(std::move(*job.batch_request)); + shuttle_.push_result({std::move(batch), job.sequence_number}); + } catch (...) { + shuttle_.push_result({std::current_exception(), job.sequence_number}); + } + } + } + + /// Convenience method that calls `shuttle_.push_job()` with the next sequence + /// number. + template + void push_job(T value) { + shuttle_.push_job({std::move(value), sequence_number_++}); + } + + /// Convenience method that gets the next result from the sequencer. + optional pop_result() { + return sequencer_->next( + [this] { return this->shuttle_.pop_result(this->options_.timeout); }); + } + + /// Convenience method that creates a new sequencer based on the + /// `enforce_ordering` option. + std::unique_ptr> new_sequencer() { + if (options_.enforce_ordering) { + return std::make_unique>( + options_.max_jobs); + } + return std::make_unique>(); + } + + /// The options the DataLoader was configured with. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const FullDataLoaderOptions options_; + + /// The dataset for the main thread, only has a value if the number of + /// worker threads was configured as zero, meaning the main thread has to do + /// all the work (synchronously). NOTE: Really want this to be on the heap + /// when empty, therefore `unique_ptr` and not `optional`. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::unique_ptr main_thread_dataset_; + + /// The sequence number for the *next* batch to be retrieved from the + /// dataset. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + size_t sequence_number_ = 0; + + /// The worker threads, running the `worker_thread()` method. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector workers_; + + /// The `DataShuttle` which takes care of the life cycle of a job. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + detail::DataShuttle shuttle_; + + /// The `Sequencer`, which handles optional ordering of batches. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::unique_ptr> sequencer_; + + /// True if the DataLoader has joined its worker threads. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool joined_ = false; +}; +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateful.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateful.h new file mode 100644 index 0000000000000000000000000000000000000000..e8eb85861f77f97558c3ac54fed9d15b281f73a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateful.h @@ -0,0 +1,65 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace torch { +namespace data { + +/// A dataloader for stateful datasets. +/// +/// A dataloader for stateful datatasets differs from one for stateless +/// datasets one in that the dataset is shared among worker threads, and that +/// this dataset is itself responsible for producing batches rather than +/// depending on a sampler. The statefulness here actually refers to the +/// dataset. The StatefulDataLoader simply alters the data loading algorithm to +/// accommodate the stateful, shared nature of the dataset. Note that the +/// dataset must be thread safe if more than one worker thread is used. +/// +/// A stateful dataloader is created by calling `make_data_loader` with a +/// stateful dataset. +template +class StatefulDataLoader : public DataLoaderBase< + Dataset, + typename Dataset::BatchType::value_type, + typename Dataset::BatchRequestType> { + public: + using super = DataLoaderBase< + Dataset, + typename Dataset::BatchType::value_type, + typename Dataset::BatchRequestType>; + using typename super::BatchRequestType; + + /// Constructs the `StatefulDataLoader` from a `dataset` and some `options`. + StatefulDataLoader(Dataset dataset, DataLoaderOptions options) + : super( + std::move(options), + std::make_unique(std::move(dataset))) { + for (const auto w : c10::irange(this->options_.workers)) { + // As opposed to the stateless case, here all worker threads access the + // same underlying dataset. + this->workers_.emplace_back( + [this] { this->worker_thread(*this->main_thread_dataset_); }); + } + } + + private: + /// Resets the internal state of the dataloader and the dataset. + void reset() override { + this->main_thread_dataset_->reset(); + // Call the base class method last because it calls `prefetch()` + super::reset(); + } + + /// For stateful datasets, the batch request is always the batch size. The + /// dataset is responsible for determining what goes into the batch next. + optional get_batch_request() override { + return this->options_.batch_size; + } +}; +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateless.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateless.h new file mode 100644 index 0000000000000000000000000000000000000000..9c2612bb86c36503688fc2f979572593506b46d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateless.h @@ -0,0 +1,82 @@ +#pragma once + +#include +#include + +#include +#include + +#include +#include +#include + +namespace torch { +namespace data { + +/// A dataloader for stateless datasets. +/// +/// This dataloader follows the traditional PyTorch dataloader design, whereby a +/// (posssibly) stateful sampler produces *batch requests* for a stateless +/// dataset, which acts as a simple batch request to batch mapping. The batch +/// request will often be an array of indices, and if the dataset is a simple +/// image dataset, the dataset would produce the images at those indices. +template +class StatelessDataLoader : public DataLoaderBase< + Dataset, + typename Dataset::BatchType, + typename Sampler::BatchRequestType> { + public: + using super = DataLoaderBase< + Dataset, + typename Dataset::BatchType, + typename Sampler::BatchRequestType>; + using typename super::BatchRequestType; + + /// Constructs the `StatelessDataLoader` from a `dataset`, a `sampler` and + /// some `options`. + StatelessDataLoader( + Dataset dataset, + Sampler sampler, + DataLoaderOptions options) + : super(std::move(options)), sampler_(std::move(sampler)) { + for (const auto w : c10::irange(this->options_.workers)) { + // Here we copy the dataset into the worker thread closure. Each worker + // has its own copy of the dataset. This means the dataset must be + // trivially copiable, or else we don't expect more than one worker to + // be in use. + (void)w; // Suppress unused variable warning + this->workers_.emplace_back( + [this, dataset]() mutable { this->worker_thread(dataset); }); + } + if (this->options_.workers == 0) { + this->main_thread_dataset_ = + std::make_unique(std::move(dataset)); + } + } + + private: + /// Resets the internal state of the dataloader and the sampler. + void reset() override { + sampler_.reset(); + // Call the base class method last because it calls `prefetch()` + super::reset(); + } + + /// Queries the sampler for the next batch request (possibly progressing its + /// internal state). + optional get_batch_request() override { + auto indices = sampler_.next(this->options_.batch_size); + if (!indices || + (indices->size() < this->options_.batch_size && + this->options_.drop_last)) { + return nullopt; + } + AT_ASSERT(indices->size() > 0); + return indices; + } + + /// The `Sampler` used to produce batch requests. + Sampler sampler_; +}; +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets.h new file mode 100644 index 0000000000000000000000000000000000000000..df565e97235828e5c89c76f0373bc1cdaee01287 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets.h @@ -0,0 +1,9 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/map.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/map.h new file mode 100644 index 0000000000000000000000000000000000000000..7b8b8febd222aca46e1c90800dc16c71780b4718 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/map.h @@ -0,0 +1,118 @@ +#pragma once + +#include +#include + +#include + +#include +#include +#include + +namespace torch { +namespace data { +namespace datasets { +namespace detail { +template +using optional_if_t = typename std::conditional, T>::type; +} // namespace detail + +/// A `MapDataset` is a dataset that applies a transform to a source dataset. +template +class MapDataset : public BatchDataset< + MapDataset, + detail::optional_if_t< + SourceDataset::is_stateful, + typename AppliedTransform::OutputBatchType>, + typename SourceDataset::BatchRequestType> { + public: + using DatasetType = SourceDataset; + using TransformType = AppliedTransform; + using BatchRequestType = typename SourceDataset::BatchRequestType; + using OutputBatchType = detail::optional_if_t< + SourceDataset::is_stateful, + typename AppliedTransform::OutputBatchType>; + + MapDataset(DatasetType dataset, TransformType transform) + : dataset_(std::move(dataset)), transform_(std::move(transform)) {} + + /// Gets a batch from the source dataset and applies the transform to it, + /// returning the result. + OutputBatchType get_batch(BatchRequestType indices) override { + return get_batch_impl(std::move(indices)); + } + + /// Returns the size of the source dataset. + // NOLINTNEXTLINE(bugprone-exception-escape) + optional size() const noexcept override { + return dataset_.size(); + } + + /// Calls `reset()` on the underlying dataset. + /// NOTE: Stateless datasets do not have a reset() method, so a call to this + /// method will only compile for stateful datasets (which have a reset() + /// method). + void reset() { + dataset_.reset(); + } + + /// Returns the underlying dataset. + const SourceDataset& dataset() noexcept { + return dataset_; + } + + /// Returns the transform being applied. + const AppliedTransform& transform() noexcept { + return transform_; + } + + private: + /// The implementation of `get_batch()` for the stateless case, which simply + /// applies the transform to the output of `get_batch()` from the dataset. + template < + typename D = SourceDataset, + typename = torch::disable_if_t> + OutputBatchType get_batch_impl(BatchRequestType indices) { + return transform_.apply_batch(dataset_.get_batch(std::move(indices))); + } + + /// The implementation of `get_batch()` for the stateful case. Here, we follow + /// the semantics of `Optional.map()` in many functional languages, which + /// applies a transformation to the optional's content when the optional + /// contains a value, and returns a new optional (of a different type) if the + /// original optional returned by `get_batch()` was empty. + template + torch::enable_if_t get_batch_impl( + BatchRequestType indices) { + if (auto batch = dataset_.get_batch(std::move(indices))) { + return transform_.apply_batch(std::move(*batch)); + } + return nullopt; + } + + /// The underlying dataset being transformed. + SourceDataset dataset_; + + // The transformation that is applied to batches received from the dataset. + AppliedTransform transform_; +}; + +/// Creates a `MapDataset` with the given dataset and transform. +template +MapDataset map( + DatasetType dataset, + TransformType transform) { + static_assert( + std::is_same< + typename std::conditional< + DatasetType::is_stateful, + typename DatasetType::BatchType::value_type, + typename DatasetType::BatchType>::type, + typename TransformType::InputBatchType>::value, + "BatchType type of dataset does not match input type of transform"); + return {std::move(dataset), std::move(transform)}; +} + +} // namespace datasets +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/mnist.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/mnist.h new file mode 100644 index 0000000000000000000000000000000000000000..3187eae9da176a50a866cdf7fd0d3507b94a646b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/mnist.h @@ -0,0 +1,48 @@ +#pragma once + +#include +#include +#include + +#include + +#include +#include + +namespace torch { +namespace data { +namespace datasets { +/// The MNIST dataset. +class TORCH_API MNIST : public Dataset { + public: + /// The mode in which the dataset is loaded. + enum class Mode { kTrain, kTest }; + + /// Loads the MNIST dataset from the `root` path. + /// + /// The supplied `root` path should contain the *content* of the unzipped + /// MNIST dataset, available from http://yann.lecun.com/exdb/mnist. + explicit MNIST(const std::string& root, Mode mode = Mode::kTrain); + + /// Returns the `Example` at the given `index`. + Example<> get(size_t index) override; + + /// Returns the size of the dataset. + optional size() const override; + + /// Returns true if this is the training subset of MNIST. + // NOLINTNEXTLINE(bugprone-exception-escape) + bool is_train() const noexcept; + + /// Returns all images stacked into a single tensor. + const Tensor& images() const; + + /// Returns all targets stacked into a single tensor. + const Tensor& targets() const; + + private: + Tensor images_, targets_; +}; +} // namespace datasets +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/shared.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/shared.h new file mode 100644 index 0000000000000000000000000000000000000000..097214746d0019479fdedc5e0bb19c0dfbec7933 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/shared.h @@ -0,0 +1,83 @@ +#pragma once + +#include + +#include +#include + +namespace torch { +namespace data { +namespace datasets { + +/// A dataset that wraps another dataset in a shared pointer and implements the +/// `BatchDataset` API, delegating all calls to the shared instance. This is +/// useful when you want all worker threads in the dataloader to access the same +/// dataset instance. The dataset must take care of synchronization and +/// thread-safe access itself. +/// +/// Use `torch::data::datasets::make_shared_dataset()` to create a new +/// `SharedBatchDataset` like you would a `std::shared_ptr`. +template +class SharedBatchDataset : public BatchDataset< + SharedBatchDataset, + typename UnderlyingDataset::BatchType, + typename UnderlyingDataset::BatchRequestType> { + public: + using BatchType = typename UnderlyingDataset::BatchType; + using BatchRequestType = typename UnderlyingDataset::BatchRequestType; + + /// Constructs a new `SharedBatchDataset` from a `shared_ptr` to the + /// `UnderlyingDataset`. + /* implicit */ SharedBatchDataset( + std::shared_ptr shared_dataset) + : dataset_(std::move(shared_dataset)) {} + + /// Calls `get_batch` on the underlying dataset. + BatchType get_batch(BatchRequestType request) override { + return dataset_->get_batch(std::move(request)); + } + + /// Returns the `size` from the underlying dataset. + optional size() const override { + return dataset_->size(); + } + + /// Accesses the underlying dataset. + UnderlyingDataset& operator*() { + return *dataset_; + } + + /// Accesses the underlying dataset. + const UnderlyingDataset& operator*() const { + return *dataset_; + } + + /// Accesses the underlying dataset. + UnderlyingDataset* operator->() { + return dataset_.get(); + } + + /// Accesses the underlying dataset. + const UnderlyingDataset* operator->() const { + return dataset_.get(); + } + + /// Calls `reset()` on the underlying dataset. + void reset() { + dataset_->reset(); + } + + private: + std::shared_ptr dataset_; +}; + +/// Constructs a new `SharedBatchDataset` by creating a +/// `shared_ptr`. All arguments are forwarded to +/// `make_shared`. +template +SharedBatchDataset make_shared_dataset(Args&&... args) { + return std::make_shared(std::forward(args)...); +} +} // namespace datasets +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/stateful.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/stateful.h new file mode 100644 index 0000000000000000000000000000000000000000..0b1518b27da39d6d2aa1dbb06bf1004fb16de0d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/stateful.h @@ -0,0 +1,70 @@ +#pragma once + +#include +#include + +#include +#include + +namespace torch { +namespace serialize { +class OutputArchive; +class InputArchive; +} // namespace serialize +} // namespace torch + +namespace torch { +namespace data { +namespace datasets { + +/// A stateful dataset is a dataset that maintains some internal state, which +/// will be `reset()` at the beginning of each epoch. Subclasses can override +/// the `reset()` method to configure this behavior. Further, the return type of +/// a stateful dataset's `get_batch()` method is always an `optional`. When the +/// stateful dataset wants to indicate to the dataloader that its epoch has +/// ended, it should return an empty optional. The dataloader knows to modify +/// its implementation based on whether the dataset is stateless or stateful. +/// +/// Note that when subclassing a from `StatefulDataset`, the return +/// type of `get_batch()`, which the subclass must override, will be +/// `optional` (i.e. the type specified in the `StatefulDataset` +/// specialization is automatically boxed into an `optional` for the dataset's +/// `BatchType`). +template < + typename Self, + typename Batch = std::vector>, + typename BatchRequest = size_t> +class StatefulDataset + : public BatchDataset, BatchRequest> { + public: + /// Resets internal state of the dataset. + virtual void reset() = 0; + + /// Saves the statefulDataset's state to OutputArchive. + virtual void save(serialize::OutputArchive& archive) const = 0; + + /// Deserializes the statefulDataset's state from the `archive`. + virtual void load(serialize::InputArchive& archive) = 0; +}; + +/// Serializes a statefulDataset to `OutputArchive`. +template +serialize::OutputArchive& operator<<( + serialize::OutputArchive& archive, + const StatefulDataset& statefulDataset) { + statefulDataset.save(archive); + return archive; +} + +/// Deserializes a statefulDataset from an `InputArchive`. +template +serialize::InputArchive& operator>>( + serialize::InputArchive& archive, + StatefulDataset& statefulDataset) { + statefulDataset.load(archive); + return archive; +} + +} // namespace datasets +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/base.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/base.h new file mode 100644 index 0000000000000000000000000000000000000000..780f19035c6a3dd0b48ea358b32ec3ead2996843 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/base.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace torch { +namespace serialize { +class OutputArchive; +class InputArchive; +} // namespace serialize +} // namespace torch + +namespace torch { +namespace data { +namespace samplers { +/// A `Sampler` is an object that yields an index with which to access a +/// dataset. +template > +class Sampler { + public: + using BatchRequestType = BatchRequest; + + virtual ~Sampler() = default; + + /// Resets the `Sampler`'s internal state. + /// Typically called before a new epoch. + /// Optionally, accepts a new size when reseting the sampler. + virtual void reset(optional new_size) = 0; + + /// Returns the next index if possible, or an empty optional if the + /// sampler is exhausted for this epoch. + virtual optional next(size_t batch_size) = 0; + + /// Serializes the `Sampler` to the `archive`. + virtual void save(serialize::OutputArchive& archive) const = 0; + + /// Deserializes the `Sampler` from the `archive`. + virtual void load(serialize::InputArchive& archive) = 0; +}; + +} // namespace samplers +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/custom_batch_request.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/custom_batch_request.h new file mode 100644 index 0000000000000000000000000000000000000000..a5247b008d75021c3627d1a3bc922072c96f7812 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/custom_batch_request.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include + +namespace torch { +namespace data { +namespace samplers { +/// A base class for custom index types. +struct TORCH_API CustomBatchRequest { + CustomBatchRequest() = default; + CustomBatchRequest(const CustomBatchRequest&) = default; + CustomBatchRequest(CustomBatchRequest&&) noexcept = default; + virtual ~CustomBatchRequest() = default; + + /// The number of elements accessed by this index. + virtual size_t size() const = 0; +}; +} // namespace samplers +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/distributed.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/distributed.h new file mode 100644 index 0000000000000000000000000000000000000000..82eed0913100d1579a4375668a45a9979fcc2c6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/distributed.h @@ -0,0 +1,139 @@ +#pragma once + +#include +#include + +#include +#include + +namespace torch { +namespace serialize { +class OutputArchive; +class InputArchive; +} // namespace serialize +} // namespace torch + +namespace torch { +namespace data { +namespace samplers { + +/// A `Sampler` that selects a subset of indices to sample from and defines a +/// sampling behavior. In a distributed setting, this selects a subset of the +/// indices depending on the provided num_replicas and rank parameters. The +/// `Sampler` performs a rounding operation based on the `allow_duplicates` +/// parameter to decide the local sample count. +template > +class DistributedSampler : public Sampler { + public: + DistributedSampler( + size_t size, + size_t num_replicas = 1, + size_t rank = 0, + bool allow_duplicates = true) + : size_(size), + num_replicas_(num_replicas), + rank_(rank), + epoch_(0), + allow_duplicates_(allow_duplicates) {} + + /// Set the epoch for the current enumeration. This can be used to alter the + /// sample selection and shuffling behavior. + void set_epoch(size_t epoch) { + epoch_ = epoch; + } + + size_t epoch() const { + return epoch_; + } + + protected: + size_t local_sample_count() { + if (allow_duplicates_) { + return (size_ + num_replicas_ - 1) / num_replicas_; + } else { + return size_ / num_replicas_; + } + } + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + size_t size_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + size_t num_replicas_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + size_t rank_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + size_t epoch_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool allow_duplicates_; +}; + +/// Select samples randomly. The sampling order is shuffled at each `reset()` +/// call. +class TORCH_API DistributedRandomSampler : public DistributedSampler<> { + public: + DistributedRandomSampler( + size_t size, + size_t num_replicas = 1, + size_t rank = 0, + bool allow_duplicates = true); + + /// Resets the `DistributedRandomSampler` to a new set of indices. + void reset(optional new_size = nullopt) override; + + /// Returns the next batch of indices. + optional> next(size_t batch_size) override; + + /// Serializes the `DistributedRandomSampler` to the `archive`. + void save(serialize::OutputArchive& archive) const override; + + /// Deserializes the `DistributedRandomSampler` from the `archive`. + void load(serialize::InputArchive& archive) override; + + /// Returns the current index of the `DistributedRandomSampler`. + size_t index() const noexcept; + + private: + void populate_indices(); + + size_t begin_index_; + size_t end_index_; + size_t sample_index_; + std::vector all_indices_; +}; + +/// Select samples sequentially. +class TORCH_API DistributedSequentialSampler : public DistributedSampler<> { + public: + DistributedSequentialSampler( + size_t size, + size_t num_replicas = 1, + size_t rank = 0, + bool allow_duplicates = true); + + /// Resets the `DistributedSequentialSampler` to a new set of indices. + void reset(optional new_size = nullopt) override; + + /// Returns the next batch of indices. + optional> next(size_t batch_size) override; + + /// Serializes the `DistributedSequentialSampler` to the `archive`. + void save(serialize::OutputArchive& archive) const override; + + /// Deserializes the `DistributedSequentialSampler` from the `archive`. + void load(serialize::InputArchive& archive) override; + + /// Returns the current index of the `DistributedSequentialSampler`. + size_t index() const noexcept; + + private: + void populate_indices(); + + size_t begin_index_; + size_t end_index_; + size_t sample_index_; + std::vector all_indices_; +}; + +} // namespace samplers +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/random.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/random.h new file mode 100644 index 0000000000000000000000000000000000000000..a1415e5ac6587903ac0aaf0f579d6f75610f229a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/random.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { +namespace serialize { +class OutputArchive; +class InputArchive; +} // namespace serialize +} // namespace torch + +namespace torch { +namespace data { +namespace samplers { + +/// A `Sampler` that returns random indices. +class TORCH_API RandomSampler : public Sampler<> { + public: + /// Constructs a `RandomSampler` with a size and dtype for the stored indices. + /// + /// The constructor will eagerly allocate all required indices, which is the + /// sequence `0 ... size - 1`. `index_dtype` is the data type of the stored + /// indices. You can change it to influence memory usage. + explicit RandomSampler(int64_t size, Dtype index_dtype = torch::kInt64); + + ~RandomSampler() override; + + /// Resets the `RandomSampler` to a new set of indices. + void reset(optional new_size = nullopt) override; + + /// Returns the next batch of indices. + optional> next(size_t batch_size) override; + + /// Serializes the `RandomSampler` to the `archive`. + void save(serialize::OutputArchive& archive) const override; + + /// Deserializes the `RandomSampler` from the `archive`. + void load(serialize::InputArchive& archive) override; + + /// Returns the current index of the `RandomSampler`. + size_t index() const noexcept; + + private: + at::Tensor indices_; + int64_t index_ = 0; +}; +} // namespace samplers +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/sequential.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/sequential.h new file mode 100644 index 0000000000000000000000000000000000000000..711d8421b230455d3afb27fecafb18c19014ce1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/sequential.h @@ -0,0 +1,50 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { +namespace serialize { +class OutputArchive; +class InputArchive; +} // namespace serialize +} // namespace torch + +namespace torch { +namespace data { +namespace samplers { + +/// A `Sampler` that returns indices sequentially. +class TORCH_API SequentialSampler : public Sampler<> { + public: + /// Creates a `SequentialSampler` that will return indices in the range + /// `0...size - 1`. + explicit SequentialSampler(size_t size); + + /// Resets the `SequentialSampler` to zero. + void reset(optional new_size = nullopt) override; + + /// Returns the next batch of indices. + optional> next(size_t batch_size) override; + + /// Serializes the `SequentialSampler` to the `archive`. + void save(serialize::OutputArchive& archive) const override; + + /// Deserializes the `SequentialSampler` from the `archive`. + void load(serialize::InputArchive& archive) override; + + /// Returns the current index of the `SequentialSampler`. + size_t index() const noexcept; + + private: + size_t size_; + size_t index_{0}; +}; + +} // namespace samplers +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/serialize.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/serialize.h new file mode 100644 index 0000000000000000000000000000000000000000..7585217a9cf260a67eb4d4fbda061a27a3fb23af --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/serialize.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include + +namespace torch { +namespace data { +namespace samplers { +/// Serializes a `Sampler` into an `OutputArchive`. +template +serialize::OutputArchive& operator<<( + serialize::OutputArchive& archive, + const Sampler& sampler) { + sampler.save(archive); + return archive; +} + +/// Deserializes a `Sampler` from an `InputArchive`. +template +serialize::InputArchive& operator>>( + serialize::InputArchive& archive, + Sampler& sampler) { + sampler.load(archive); + return archive; +} +} // namespace samplers +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/stream.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/stream.h new file mode 100644 index 0000000000000000000000000000000000000000..8d30c9e2e0d08fcdff31cbb73b10899d1f1ca8db --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/stream.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch { +namespace serialize { +class InputArchive; +class OutputArchive; +} // namespace serialize +} // namespace torch + +namespace torch { +namespace data { +namespace samplers { + +/// A wrapper around a batch size value, which implements the +/// `CustomBatchRequest` interface. +struct TORCH_API BatchSize : public CustomBatchRequest { + explicit BatchSize(size_t size); + size_t size() const noexcept override; + operator size_t() const noexcept; + size_t size_; +}; + +/// A sampler for (potentially infinite) streams of data. +/// +/// The major feature of the `StreamSampler` is that it does not return +/// particular indices, but instead only the number of elements to fetch from +/// the dataset. The dataset has to decide how to produce those elements. +class TORCH_API StreamSampler : public Sampler { + public: + /// Constructs the `StreamSampler` with the number of individual examples that + /// should be fetched until the sampler is exhausted. + explicit StreamSampler(size_t epoch_size); + + /// Resets the internal state of the sampler. + void reset(optional new_size = nullopt) override; + + /// Returns a `BatchSize` object with the number of elements to fetch in the + /// next batch. This number is the minimum of the supplied `batch_size` and + /// the difference between the `epoch_size` and the current index. If the + /// `epoch_size` has been reached, returns an empty optional. + optional next(size_t batch_size) override; + + /// Serializes the `StreamSampler` to the `archive`. + void save(serialize::OutputArchive& archive) const override; + + /// Deserializes the `StreamSampler` from the `archive`. + void load(serialize::InputArchive& archive) override; + + private: + size_t examples_retrieved_so_far_ = 0; + size_t epoch_size_; +}; + +} // namespace samplers +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/stack.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/stack.h new file mode 100644 index 0000000000000000000000000000000000000000..4be1bd920b71596b7a91dcae28acc2713c7af782 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/stack.h @@ -0,0 +1,49 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { +namespace data { +namespace transforms { + +template > +struct Stack; + +/// A `Collation` for `Example` types that stacks all data +/// tensors into one tensor, and all target (label) tensors into one tensor. +template <> +struct Stack> : public Collation> { + Example<> apply_batch(std::vector> examples) override { + std::vector data, targets; + data.reserve(examples.size()); + targets.reserve(examples.size()); + for (auto& example : examples) { + data.push_back(std::move(example.data)); + targets.push_back(std::move(example.target)); + } + return {torch::stack(data), torch::stack(targets)}; + } +}; + +/// A `Collation` for `Example` types that stacks all data +/// tensors into one tensor. +template <> +struct Stack + : public Collation> { + TensorExample apply_batch(std::vector examples) override { + std::vector data; + data.reserve(examples.size()); + for (auto& example : examples) { + data.push_back(std::move(example.data)); + } + return torch::stack(data); + } +}; +} // namespace transforms +} // namespace data +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h new file mode 100644 index 0000000000000000000000000000000000000000..aaf30d90974b11bd97dfa7617bc78faf13ded068 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h @@ -0,0 +1,98 @@ +#pragma once + +#include +#include +#include + +#include +#include + +#include +#include + +namespace torch { +namespace nn { +/// The `clone()` method in the base `Module` class does not have knowledge of +/// the concrete runtime type of its subclasses. Therefore, `clone()` must +/// either be called from within the subclass, or from a base class that has +/// knowledge of the concrete type. `Cloneable` uses the CRTP to gain +/// knowledge of the subclass' static type and provide an implementation of the +/// `clone()` method. We do not want to use this pattern in the base class, +/// because then storing a module would always require templatizing it. +template +// NOLINTNEXTLINE(bugprone-exception-escape) +class Cloneable : public Module { + public: + using Module::Module; + + /// `reset()` must perform initialization of all members with reference + /// semantics, most importantly parameters, buffers and submodules. + virtual void reset() = 0; + + /// Performs a recursive "deep copy" of the `Module`, such that all parameters + /// and submodules in the cloned module are different from those in the + /// original module. + std::shared_ptr clone( + const optional& device = nullopt) const override { + NoGradGuard no_grad; + + const auto& self = static_cast(*this); + auto copy = std::make_shared(self); + copy->parameters_.clear(); + copy->buffers_.clear(); + copy->children_.clear(); + copy->reset(); + TORCH_CHECK( + copy->parameters_.size() == parameters_.size(), + "The cloned module does not have the same number of " + "parameters as the original module after calling reset(). " + "Are you sure you called register_parameter() inside reset() " + "and not the constructor?"); + for (const auto& parameter : named_parameters(/*recurse=*/false)) { + auto& tensor = *parameter; + auto data = device && tensor.device() != *device + ? tensor.to(*device) + : autograd::Variable(tensor).clone(); + copy->parameters_[parameter.key()].set_data(data); + } + TORCH_CHECK( + copy->buffers_.size() == buffers_.size(), + "The cloned module does not have the same number of " + "buffers as the original module after calling reset(). " + "Are you sure you called register_buffer() inside reset() " + "and not the constructor?"); + for (const auto& buffer : named_buffers(/*recurse=*/false)) { + auto& tensor = *buffer; + auto data = device && tensor.device() != *device + ? tensor.to(*device) + : autograd::Variable(tensor).clone(); + copy->buffers_[buffer.key()].set_data(data); + } + TORCH_CHECK( + copy->children_.size() == children_.size(), + "The cloned module does not have the same number of " + "child modules as the original module after calling reset(). " + "Are you sure you called register_module() inside reset() " + "and not the constructor?"); + for (const auto& child : children_) { + copy->children_[child.key()]->clone_(*child.value(), device); + } + return copy; + } + + private: + void clone_(Module& other, const optional& device) final { + // Here we are *pretty* certain that `other's` type is `Derived` (because it + // was registered under the same name as `this`), but you never know what + // crazy things `reset()` does, so `dynamic_cast` just to be safe. + auto clone = std::dynamic_pointer_cast(other.clone(device)); + TORCH_CHECK( + clone != nullptr, + "Attempted to clone submodule, but it is of a " + "different type than the submodule it was to be cloned into"); + static_cast(*this) = *clone; + } +}; + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h new file mode 100644 index 0000000000000000000000000000000000000000..b148edc68173f4d11cf58e042902edf3c508afff --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/activation.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/activation.h new file mode 100644 index 0000000000000000000000000000000000000000..52030da2aa9233fc8dbbe0b4da36afd3e939a006 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/activation.h @@ -0,0 +1,966 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor elu(Tensor input, double alpha, bool inplace) { + if (inplace) { + return torch::elu_(input, alpha); + } else { + return torch::elu(input, alpha); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.elu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::ELUFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true)); +/// ``` +inline Tensor elu(Tensor input, const ELUFuncOptions& options = {}) { + return detail::elu(std::move(input), options.alpha(), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor selu(Tensor input, bool inplace) { + if (inplace) { + return torch::selu_(input); + } else { + return torch::selu(input); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.selu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SELUFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::selu(input, F::SELUFuncOptions(false)); +/// ``` +inline Tensor selu(Tensor input, const SELUFuncOptions& options = {}) { + return detail::selu(std::move(input), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor hardshrink(const Tensor& input, double lambda) { + return torch::hardshrink(input, lambda); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hardshrink +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::HardshrinkFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42)); +/// ``` +inline Tensor hardshrink( + const Tensor& input, + const HardshrinkFuncOptions& options = {}) { + return detail::hardshrink(input, options.lambda()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor hardtanh( + Tensor input, + double min_val, + double max_val, + bool inplace) { + if (inplace) { + return torch::hardtanh_(input, min_val, max_val); + } else { + return torch::hardtanh(input, min_val, max_val); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hardtanh +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::HardtanhFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::hardtanh(x, +/// F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true)); +/// ``` +inline Tensor hardtanh(Tensor input, const HardtanhFuncOptions& options = {}) { + return detail::hardtanh( + std::move(input), + options.min_val(), + options.max_val(), + options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor leaky_relu(Tensor input, double negative_slope, bool inplace) { + if (inplace) { + return torch::leaky_relu_(input, negative_slope); + } else { + return torch::leaky_relu(input, negative_slope); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.leaky_relu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::LeakyReLUFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::leaky_relu(x, +/// F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true)); +/// ``` +inline Tensor leaky_relu( + Tensor input, + const LeakyReLUFuncOptions& options = {}) { + return detail::leaky_relu( + std::move(input), options.negative_slope(), options.inplace()); +} + +// ============================================================================ + +inline Tensor logsigmoid(const Tensor& input) { + return torch::log_sigmoid(input); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor gumbel_softmax( + const Tensor& logits, + double tau, + bool hard, + int dim) { + auto gumbels = + -torch::empty_like(logits).exponential_().log(); // ~Gumbel(0,1) + gumbels = (logits + gumbels) / tau; // ~Gumbel(logits, tau) + auto y_soft = gumbels.softmax(dim); + + torch::Tensor ret; + if (hard) { + // Straight through. + auto index = std::get<1>(y_soft.max(dim, /*keepdim=*/true)); + auto y_hard = torch::zeros_like(logits).scatter_(dim, index, 1.0); + ret = y_hard - y_soft.detach() + y_soft; + } else { + ret = y_soft; + } + return ret; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.gumbel_softmax +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::GumbelSoftmaxFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(-1)); +/// ``` +inline Tensor gumbel_softmax( + const Tensor& logits, + const GumbelSoftmaxFuncOptions& options = {}) { + return detail::gumbel_softmax( + logits, options.tau(), options.hard(), options.dim()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor softmax( + const Tensor& input, + int64_t dim, + c10::optional dtype) { + Tensor ret; + + if (dtype == c10::nullopt) { + ret = input.softmax(dim); + } else { + ret = input.softmax(dim, dtype); + } + + return ret; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softmax +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SoftmaxFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softmax(input, F::SoftmaxFuncOptions(1)); +/// ``` +inline Tensor softmax(const Tensor& input, const SoftmaxFuncOptions& options) { + return detail::softmax(input, options.dim(), options.dtype()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor softmin( + const Tensor& input, + int64_t dim, + c10::optional dtype) { + Tensor ret; + + if (dtype == c10::nullopt) { + ret = (-input).softmax(dim); + } else { + ret = (-input).softmax(dim, dtype); + } + + return ret; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softmin +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SoftminFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softmin(input, F::SoftminFuncOptions(1)); +/// ``` +inline Tensor softmin(const Tensor& input, const SoftminFuncOptions& options) { + return detail::softmin(input, options.dim(), options.dtype()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor log_softmax( + const Tensor& input, + int64_t dim, + c10::optional dtype) { + Tensor ret; + + if (dtype == c10::nullopt) { + ret = input.log_softmax(dim); + } else { + ret = input.log_softmax(dim, dtype); + } + + return ret; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.log_softmax +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::LogSoftmaxFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::log_softmax(input, LogSoftmaxFuncOptions(1)); +/// ``` +inline Tensor log_softmax( + const Tensor& input, + const LogSoftmaxFuncOptions& options) { + return detail::log_softmax(input, options.dim(), options.dtype()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor glu(const Tensor& input, int64_t dim) { + TORCH_CHECK( + input.dim() != 0, + "glu does not suppport scalars because halving size must be even"); + return torch::glu(input, dim); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.glu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::GLUFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::glu(input, GLUFuncOptions(1)); +/// ``` +inline Tensor glu(const Tensor& input, const GLUFuncOptions& options = {}) { + return detail::glu(input, options.dim()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor gelu(const Tensor& input, string approximate) { + return torch::gelu(input, approximate); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +inline Tensor gelu(const Tensor& input, const GELUFuncOptions& options = {}) { + return detail::gelu(input, options.approximate()); +} + +// ============================================================================ + +inline Tensor silu(const Tensor& input) { + return torch::silu(input); +} + +// ============================================================================ + +inline Tensor mish(const Tensor& input) { + return torch::mish(input); +} + +// ============================================================================ + +inline Tensor prelu(const Tensor& input, const Tensor& weight) { + return torch::prelu(input, weight); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor relu(Tensor input, bool inplace) { + if (inplace) { + return torch::relu_(input); + } else { + return torch::relu(input); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.relu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::ReLUFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::relu(x, F::ReLUFuncOptions().inplace(true)); +/// ``` +inline Tensor relu(Tensor input, const ReLUFuncOptions& options = {}) { + return detail::relu(std::move(input), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor relu6(Tensor input, bool inplace) { + if (inplace) { + return torch::relu6_(input); + } else { + return torch::relu6(input); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.relu6 +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::ReLU6FuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::relu6(x, F::ReLU6FuncOptions().inplace(true)); +/// ``` +inline Tensor relu6(Tensor input, const ReLU6FuncOptions& options = {}) { + return detail::relu6(std::move(input), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor rrelu( + Tensor input, + double lower, + double upper, + bool training, + bool inplace) { + if (inplace) { + return torch::rrelu_(input, lower, upper, training); + } else { + return torch::rrelu(input, lower, upper, training); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.rrelu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::RReLUFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::rrelu(x, F::RReLUFuncOptions().lower(0.1).upper(0.4).inplace(true)); +/// ``` +inline Tensor rrelu(Tensor input, const RReLUFuncOptions& options = {}) { + return detail::rrelu( + std::move(input), + options.lower(), + options.upper(), + options.training(), + options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor celu(Tensor input, double alpha, bool inplace) { + if (inplace) { + return torch::celu_(input, alpha); + } else { + return torch::celu(input, alpha); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.celu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::CELUFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true)); +/// ``` +inline Tensor celu(Tensor input, const CELUFuncOptions& options = {}) { + return detail::celu(std::move(input), options.alpha(), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor softplus(const Tensor& input, double beta, double threshold) { + return torch::softplus(input, beta, threshold); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softplus +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SoftplusFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0)); +/// ``` +inline Tensor softplus( + const Tensor& input, + const SoftplusFuncOptions& options = {}) { + return detail::softplus(input, options.beta(), options.threshold()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor softshrink(const Tensor& input, double lambda) { + return torch::softshrink(input, lambda); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softshrink +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SoftshrinkFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softshrink(x, F::SoftshrinkFuncOptions(0.42)); +/// ``` +inline Tensor softshrink( + const Tensor& input, + const SoftshrinkFuncOptions& options = {}) { + return detail::softshrink(input, options.lambda()); +} + +// ============================================================================ + +inline Tensor softsign(const Tensor& input) { + return input / (input.abs() + 1); +} + +// ============================================================================ + +inline Tensor tanhshrink(const Tensor& input) { + return input - input.tanh(); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor threshold( + Tensor input, + double threshold, + double value, + bool inplace) { + if (inplace) { + return torch::threshold_(input, threshold, value); + } else { + return torch::threshold(input, threshold, value); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.threshold +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::ThresholdFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true)); +/// ``` +inline Tensor threshold(Tensor input, const ThresholdFuncOptions& options) { + return detail::threshold( + std::move(input), + options.threshold(), + options.value(), + options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple multi_head_attention_forward( + const Tensor& query, + const Tensor& key, + const Tensor& value, + int64_t embed_dim_to_check, + int64_t num_heads, + const Tensor& in_proj_weight, + const Tensor& in_proj_bias, + const Tensor& bias_k, + const Tensor& bias_v, + bool add_zero_attn, + double dropout_p, + const Tensor& out_proj_weight, + const Tensor& out_proj_bias, + bool training = true, + const Tensor& key_padding_mask = {}, + bool need_weights = true, + const Tensor& attn_mask = {}, + bool use_separate_proj_weight = false, + const Tensor& q_proj_weight = {}, + const Tensor& k_proj_weight = {}, + const Tensor& v_proj_weight = {}, + const Tensor& static_k = {}, + const Tensor& static_v = {}, + bool average_attn_weights = true) { + namespace F = torch::nn::functional; + + const auto query_sizes = query.sizes(); + const auto& tgt_len = query_sizes[0]; + const auto& bsz = query_sizes[1]; + const auto& embed_dim = query_sizes[2]; + TORCH_INTERNAL_ASSERT(embed_dim == embed_dim_to_check); + TORCH_INTERNAL_ASSERT(key.sizes() == value.sizes()); + + const auto head_dim = embed_dim / num_heads; + TORCH_CHECK( + head_dim * num_heads == embed_dim, + "embed_dim must be divisible by num_heads"); + const auto scaling = 1 / std::sqrt(head_dim); + + Tensor q, k, v; + if (!use_separate_proj_weight) { + if (torch::equal(query, key) && torch::equal(key, value)) { + // self-attention + const auto chunks = + F::linear(query, in_proj_weight, in_proj_bias).chunk(3, /*dim=*/-1); + q = chunks[0]; + k = chunks[1]; + v = chunks[2]; + } else if (torch::equal(key, value)) { + // encoder-decoder attention + // This is inline in_proj function with in_proj_weight and in_proj_bias + auto _b = in_proj_bias; + auto _start = 0; + auto _end = embed_dim; + auto _w = in_proj_weight.slice(/*dim=*/0, _start, _end); + if (_b.defined()) { + _b = _b.slice(/*dim=*/0, _start, _end); + } + q = F::linear(query, _w, _b); + + if (!key.defined()) { + TORCH_INTERNAL_ASSERT(!value.defined()); + k.reset(); + v.reset(); + } else { + // This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias; + _start = embed_dim; + _w = in_proj_weight.slice(/*dim=*/0, _start); + if (_b.defined()) { + _b = _b.slice(/*dim=*/0, _start); + } + const auto chunks = F::linear(key, _w, _b).chunk(2, /*dim=*/-1); + k = chunks[0]; + v = chunks[1]; + } + } else { + // This is inline in_proj function with in_proj_weight and in_proj_bias + auto _b = in_proj_bias; + auto _start = 0; + auto _end = embed_dim; + auto _w = in_proj_weight.slice(/*dim=*/0, _start, _end); + if (_b.defined()) { + _b = _b.slice(/*dim=*/0, _start, _end); + } + q = F::linear(query, _w, _b); + + // This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias; + _start = embed_dim; + _end = embed_dim * 2; + _w = in_proj_weight.slice(/*dim=*/0, _start, _end); + if (_b.defined()) { + _b = _b.slice(/*dim=*/0, _start, _end); + } + k = F::linear(key, _w, _b); + + // This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias; + _start = embed_dim * 2; + _w = in_proj_weight.slice(/*dim=*/0, _start); + if (_b.defined()) { + _b = _b.slice(0, _start); + } + v = F::linear(value, _w, _b); + } + } else { + const auto& q_proj_weight_non_opt = q_proj_weight; + { + const auto sizes = q_proj_weight_non_opt.sizes(); + const auto len1 = sizes[0]; + const auto len2 = sizes[1]; + TORCH_CHECK(len1 == embed_dim && len2 == query.size(-1)); + } + + const auto& k_proj_weight_non_opt = k_proj_weight; + { + const auto sizes = k_proj_weight_non_opt.sizes(); + const auto len1 = sizes[0]; + const auto len2 = sizes[1]; + TORCH_CHECK(len1 == embed_dim && len2 == key.size(-1)); + } + + const auto& v_proj_weight_non_opt = v_proj_weight; + { + const auto sizes = v_proj_weight_non_opt.sizes(); + const auto len1 = sizes[0]; + const auto len2 = sizes[1]; + TORCH_CHECK(len1 == embed_dim && len2 == value.size(-1)); + } + + if (in_proj_bias.defined()) { + q = F::linear( + query, + q_proj_weight_non_opt, + in_proj_bias.slice(/*dim=*/0, 0, embed_dim)); + k = F::linear( + key, + k_proj_weight_non_opt, + in_proj_bias.slice(/*dim=*/0, embed_dim, (embed_dim * 2))); + v = F::linear( + value, + v_proj_weight_non_opt, + in_proj_bias.slice(/*dim=*/0, (embed_dim * 2))); + } else { + q = F::linear(query, q_proj_weight_non_opt, in_proj_bias); + k = F::linear(key, k_proj_weight_non_opt, in_proj_bias); + v = F::linear(value, v_proj_weight_non_opt, in_proj_bias); + } + } + q = q * scaling; + Tensor attn_mask_ = attn_mask; + Tensor key_padding_mask_ = key_padding_mask; + if (bias_k.defined() && bias_v.defined()) { + if (!static_k.defined() && !static_v.defined()) { + k = torch::cat({k, bias_k.repeat({1, bsz, 1})}); + v = torch::cat({v, bias_v.repeat({1, bsz, 1})}); + if (attn_mask_.defined()) { + attn_mask_ = torch::cat( + {attn_mask_, + torch::zeros( + {attn_mask_.size(0), 1}, + at::TensorOptions(attn_mask_.dtype()) + .device(attn_mask_.device()))}, + /*dim=*/1); + } + if (key_padding_mask_.defined()) { + key_padding_mask_ = torch::cat( + {key_padding_mask_, + torch::zeros( + {key_padding_mask_.size(0), 1}, + at::TensorOptions(key_padding_mask_.dtype()) + .device(key_padding_mask_.device()))}, + /*dim=*/1); + } + } else { + TORCH_CHECK(!static_k.defined(), "bias cannot be added to static key."); + TORCH_CHECK(!static_v.defined(), "bias cannot be added to static value."); + } + } else { + TORCH_CHECK(!bias_k.defined()); + TORCH_CHECK(!bias_v.defined()); + } + q = q.contiguous().view({tgt_len, bsz * num_heads, head_dim}).transpose(0, 1); + if (k.defined()) { + k = k.contiguous().view({-1, bsz * num_heads, head_dim}).transpose(0, 1); + } + if (v.defined()) { + v = v.contiguous().view({-1, bsz * num_heads, head_dim}).transpose(0, 1); + } + if (static_k.defined()) { + TORCH_CHECK(static_k.size(0) == bsz * num_heads); + TORCH_CHECK(static_k.size(2) == head_dim); + k = static_k; + } + if (static_v.defined()) { + TORCH_CHECK(static_v.size(0) == bsz * num_heads); + TORCH_CHECK(static_v.size(2) == head_dim); + v = static_v; + } + auto src_len = k.size(1); + if (key_padding_mask_.defined()) { + TORCH_CHECK(key_padding_mask_.size(0) == bsz); + TORCH_CHECK(key_padding_mask_.size(1) == src_len); + } + if (add_zero_attn) { + src_len += 1; + auto k_sizes = k.sizes().vec(); + k_sizes[1] = 1; + k = torch::cat( + {k, + torch::zeros( + k_sizes, at::TensorOptions(k.dtype()).device(k.device()))}, + /*dim=*/1); + auto v_sizes = v.sizes().vec(); + v_sizes[1] = 1; + v = torch::cat( + {v, + torch::zeros( + v_sizes, at::TensorOptions(v.dtype()).device(v.device()))}, + /*dim=*/1); + if (attn_mask_.defined()) { + attn_mask_ = torch::cat( + {attn_mask_, + torch::zeros( + {attn_mask_.size(0), 1}, + at::TensorOptions(attn_mask_.dtype()) + .device(attn_mask_.device()))}, + /*dim=*/1); + } + if (key_padding_mask_.defined()) { + key_padding_mask_ = torch::cat( + {key_padding_mask_, + torch::zeros( + {key_padding_mask_.size(0), 1}, + at::TensorOptions(key_padding_mask_.dtype()) + .device(key_padding_mask_.device()))}, + /*dim=*/1); + } + } + auto attn_output_weights = torch::bmm(q, k.transpose(1, 2)); + TORCH_CHECK( + attn_output_weights.sizes() == + IntArrayRef({bsz * num_heads, tgt_len, src_len})); + if (attn_mask_.defined()) { + attn_mask_ = attn_mask_.unsqueeze(0); + attn_output_weights += attn_mask_; + } + if (key_padding_mask_.defined()) { + attn_output_weights = + attn_output_weights.view({bsz, num_heads, tgt_len, src_len}); + attn_output_weights = AT_DISPATCH_FLOATING_TYPES( + attn_output_weights.scalar_type(), + "attn_output_weights.masked_fill", + [&]() { + return attn_output_weights.masked_fill( + key_padding_mask_.unsqueeze(1).unsqueeze(2), + -std::numeric_limits::infinity()); + }); + attn_output_weights = + attn_output_weights.view({bsz * num_heads, tgt_len, src_len}); + } + // NOLINTNEXTLINE(bugprone-argument-comment) + attn_output_weights = F::softmax(attn_output_weights, /*dim=*/-1); + attn_output_weights = F::dropout( + attn_output_weights, + F::DropoutFuncOptions().p(dropout_p).training(training)); + auto attn_output = torch::bmm(attn_output_weights, v); + TORCH_CHECK( + attn_output.sizes() == IntArrayRef({bsz * num_heads, tgt_len, head_dim})); + attn_output = + attn_output.transpose(0, 1).contiguous().view({tgt_len, bsz, embed_dim}); + attn_output = F::linear(attn_output, out_proj_weight, out_proj_bias); + if (need_weights) { + attn_output_weights = + attn_output_weights.view({bsz, num_heads, tgt_len, src_len}); + if (average_attn_weights) { + // average attention weights over heads + attn_output_weights = attn_output_weights.sum(/*dim=*/1) / num_heads; + } + return std::make_tuple(attn_output, attn_output_weights); + } else { + return std::make_tuple(attn_output, Tensor()); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +inline std::tuple multi_head_attention_forward( + const Tensor& query, + const Tensor& key, + const Tensor& value, + const MultiheadAttentionForwardFuncOptions& options) { + return detail::multi_head_attention_forward( + query, + key, + value, + options.embed_dim_to_check(), + options.num_heads(), + options.in_proj_weight(), + options.in_proj_bias(), + options.bias_k(), + options.bias_v(), + options.add_zero_attn(), + options.dropout_p(), + options.out_proj_weight(), + options.out_proj_bias(), + options.training(), + options.key_padding_mask(), + options.need_weights(), + options.attn_mask(), + options.use_separate_proj_weight(), + options.q_proj_weight(), + options.k_proj_weight(), + options.v_proj_weight(), + options.static_k(), + options.static_v(), + options.average_attn_weights()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/batchnorm.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/batchnorm.h new file mode 100644 index 0000000000000000000000000000000000000000..e7b7325157616a0f90ac70dc944ccdaede9cc2d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/batchnorm.h @@ -0,0 +1,83 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor batch_norm( + const Tensor& input, + const Tensor& running_mean, + const Tensor& running_var, + Tensor weight, + Tensor bias, + bool training, + c10::optional momentum, + double eps) { + TORCH_CHECK( + input.dim() >= 2, + "Expected at least 2 input dimensions, but got ", + input.dim()); + if (training) { + auto size = input.sizes(); + int64_t size_prods = size[0]; + for (const auto i : c10::irange(size.size() - 2)) { + size_prods *= size[i + 2]; + } + TORCH_CHECK( + size_prods != 1, + "Expected more than 1 value per channel when training, got input size ", + size); + } + + return torch::batch_norm( + input, + weight, + bias, + running_mean, + running_var, + training, + momentum.value(), + eps, + at::globalContext().userEnabledCuDNN()); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.batch_norm +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::BatchNormFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::batch_norm(input, mean, variance, +/// F::BatchNormFuncOptions().weight(weight).bias(bias).momentum(0.1).eps(1e-05).training(false)); +/// ``` +inline Tensor batch_norm( + const Tensor& input, + const Tensor& running_mean, + const Tensor& running_var, + const BatchNormFuncOptions& options = {}) { + return detail::batch_norm( + input, + running_mean, + running_var, + options.weight(), + options.bias(), + options.training(), + options.momentum(), + options.eps()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/conv.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/conv.h new file mode 100644 index 0000000000000000000000000000000000000000..22f8d04ab73451a5b4f01b625ec8614149fc8adb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/conv.h @@ -0,0 +1,301 @@ +#pragma once + +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { + +inline std::string padding_unwrap(enumtype::kValid) { + return "valid"; +} + +inline std::string padding_unwrap(enumtype::kSame) { + return "same"; +} + +template +IntArrayRef padding_unwrap(const ExpandingArray& array) { + return array; +} + +inline Tensor conv1d( + const Tensor& input, + const Tensor& weight, + const Tensor& bias, + ExpandingArray<1> stride, + const Conv1dFuncOptions::padding_t& padding, + ExpandingArray<1> dilation, + int64_t groups) { + return std::visit( + [&](const auto& pad) { + return torch::conv1d( + input, weight, bias, stride, padding_unwrap(pad), dilation, groups); + }, + padding); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv1d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::Conv1dFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1)); +/// ``` +inline Tensor conv1d( + const Tensor& input, + const Tensor& weight, + const Conv1dFuncOptions& options = {}) { + return detail::conv1d( + input, + weight, + options.bias(), + options.stride(), + options.padding(), + options.dilation(), + options.groups()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor conv2d( + const Tensor& input, + const Tensor& weight, + const Tensor& bias, + ExpandingArray<2> stride, + const Conv2dFuncOptions::padding_t& padding, + ExpandingArray<2> dilation, + int64_t groups) { + return std::visit( + [&](const auto& pad) { + return torch::conv2d( + input, weight, bias, stride, padding_unwrap(pad), dilation, groups); + }, + padding); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv2d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::Conv2dFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1)); +/// ``` +inline Tensor conv2d( + const Tensor& input, + const Tensor& weight, + const Conv2dFuncOptions& options = {}) { + return detail::conv2d( + input, + weight, + options.bias(), + options.stride(), + options.padding(), + options.dilation(), + options.groups()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor conv3d( + const Tensor& input, + const Tensor& weight, + const Tensor& bias, + ExpandingArray<3> stride, + const Conv3dFuncOptions::padding_t& padding, + ExpandingArray<3> dilation, + int64_t groups) { + return std::visit( + [&](const auto& pad) { + return torch::conv3d( + input, weight, bias, stride, padding_unwrap(pad), dilation, groups); + }, + padding); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv3d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::Conv3dFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1)); +/// ``` +inline Tensor conv3d( + const Tensor& input, + const Tensor& weight, + const Conv3dFuncOptions& options = {}) { + return detail::conv3d( + input, + weight, + options.bias(), + options.stride(), + options.padding(), + options.dilation(), + options.groups()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor conv_transpose1d( + const Tensor& input, + const Tensor& weight, + const Tensor& bias, + IntArrayRef stride, + IntArrayRef padding, + IntArrayRef output_padding, + int64_t groups, + IntArrayRef dilation) { + return torch::conv_transpose1d( + input, weight, bias, stride, padding, output_padding, groups, dilation); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose1d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::ConvTranspose1dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1)); +/// ``` +inline Tensor conv_transpose1d( + const Tensor& input, + const Tensor& weight, + const ConvTranspose1dFuncOptions& options = {}) { + return detail::conv_transpose1d( + input, + weight, + options.bias(), + options.stride(), + options.padding(), + options.output_padding(), + options.groups(), + options.dilation()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor conv_transpose2d( + const Tensor& input, + const Tensor& weight, + const Tensor& bias, + IntArrayRef stride, + IntArrayRef padding, + IntArrayRef output_padding, + int64_t groups, + IntArrayRef dilation) { + return torch::conv_transpose2d( + input, weight, bias, stride, padding, output_padding, groups, dilation); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose2d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::ConvTranspose2dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1)); +/// ``` +inline Tensor conv_transpose2d( + const Tensor& input, + const Tensor& weight, + const ConvTranspose2dFuncOptions& options = {}) { + return detail::conv_transpose2d( + input, + weight, + options.bias(), + options.stride(), + options.padding(), + options.output_padding(), + options.groups(), + options.dilation()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor conv_transpose3d( + const Tensor& input, + const Tensor& weight, + const Tensor& bias, + IntArrayRef stride, + IntArrayRef padding, + IntArrayRef output_padding, + int64_t groups, + IntArrayRef dilation) { + return torch::conv_transpose3d( + input, weight, bias, stride, padding, output_padding, groups, dilation); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose3d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::ConvTranspose3dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1)); +/// ``` +inline Tensor conv_transpose3d( + const Tensor& input, + const Tensor& weight, + const ConvTranspose3dFuncOptions& options = {}) { + return detail::conv_transpose3d( + input, + weight, + options.bias(), + options.stride(), + options.padding(), + options.output_padding(), + options.groups(), + options.dilation()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/distance.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/distance.h new file mode 100644 index 0000000000000000000000000000000000000000..27914017fef22d93823a6b8c7330eab01a835706 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/distance.h @@ -0,0 +1,88 @@ +#pragma once + +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor cosine_similarity( + const Tensor& x1, + const Tensor& x2, + int64_t dim, + double eps) { + return torch::cosine_similarity(x1, x2, dim, eps); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cosine_similarity +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::CosineSimilarityFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cosine_similarity(input1, input2, +/// F::CosineSimilarityFuncOptions().dim(1)); +/// ``` +inline Tensor cosine_similarity( + const Tensor& x1, + const Tensor& x2, + const CosineSimilarityFuncOptions& options = {}) { + return detail::cosine_similarity(x1, x2, options.dim(), options.eps()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor pairwise_distance( + const Tensor& x1, + const Tensor& x2, + double p, + double eps, + bool keepdim) { + return torch::pairwise_distance(x1, x2, p, eps, keepdim); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pairwise_distance +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::PairwiseDistanceFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1)); +/// ``` +inline Tensor pairwise_distance( + const Tensor& x1, + const Tensor& x2, + const PairwiseDistanceFuncOptions& options = {}) { + return detail::pairwise_distance( + x1, x2, options.p(), options.eps(), options.keepdim()); +} + +// ============================================================================ + +/// Computes the p-norm distance between every pair of row vectors in the input. +/// This function will be faster if the rows are contiguous. +inline Tensor pdist(const Tensor& input, double p = 2.0) { + return torch::pdist(input, p); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/dropout.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/dropout.h new file mode 100644 index 0000000000000000000000000000000000000000..cb233e5a01065509da1c91e8d1855bab5308b7ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/dropout.h @@ -0,0 +1,234 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { + +inline Tensor dropout(Tensor input, double p, bool training, bool inplace) { + TORCH_CHECK( + p >= 0. && p <= 1., + "dropout probability has to be between 0 and 1, but got ", + p); + if (inplace) { + return torch::dropout_(input, p, training); + } else { + return torch::dropout(input, p, training); + } +} + +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::DropoutFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::dropout(input, F::DropoutFuncOptions().p(0.5)); +/// ``` +inline Tensor dropout(Tensor input, const DropoutFuncOptions& options = {}) { + return detail::dropout( + std::move(input), options.p(), options.training(), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { + +template +inline Tensor _dropoutNd_helper( + Tensor input, + double p, + bool training, + bool inplace, + const char* fn_name) { + TORCH_CHECK( + p >= 0. && p <= 1., + "dropout probability has to be between 0 and 1, but got ", + p); + + auto inp_dim = input.dim(); + auto is_batched = inp_dim == batched_dim; + if (!is_batched) { + if (inplace) { + input = input.unsqueeze_(0); + } else { + input = input.unsqueeze(0); + } + } + + Tensor result; + if (inplace) { + result = torch::feature_dropout_(input, p, training); + } else { + result = torch::feature_dropout(input, p, training); + } + + if (!is_batched) { + if (inplace) { + result = result.squeeze_(0); + } else { + result = result.squeeze(0); + } + } + return result; +} + +inline Tensor dropout2d(Tensor input, double p, bool training, bool inplace) { + return _dropoutNd_helper<3, 4>( + std::move(input), p, training, inplace, "dropout2d"); +} + +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout2d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::Dropout2dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5)); +/// ``` +inline Tensor dropout2d( + Tensor input, + const Dropout2dFuncOptions& options = {}) { + return detail::dropout2d( + std::move(input), options.p(), options.training(), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { + +inline Tensor dropout3d(Tensor input, double p, bool training, bool inplace) { + return _dropoutNd_helper<4, 5>( + std::move(input), p, training, inplace, "dropout3d"); +} + +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout3d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::Dropout3dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5)); +/// ``` +inline Tensor dropout3d( + Tensor input, + const Dropout3dFuncOptions& options = {}) { + return detail::dropout3d( + std::move(input), options.p(), options.training(), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { + +inline Tensor alpha_dropout( + Tensor input, + double p, + bool training, + bool inplace) { + if (p < 0. || p > 1.) { + TORCH_CHECK( + false, "dropout probability has to be between 0 and 1, but got ", p); + } + return inplace ? torch::alpha_dropout_(input, p, training) + : torch::alpha_dropout(input, p, training); +} + +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.alpha_dropout +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::AlphaDropoutFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::alpha_dropout(input, +/// F::AlphaDropoutFuncOptions().p(0.5).training(false)); +/// ``` +inline Tensor alpha_dropout( + Tensor input, + const AlphaDropoutFuncOptions& options = {}) { + return detail::alpha_dropout( + std::move(input), options.p(), options.training(), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { + +inline Tensor feature_alpha_dropout( + Tensor input, + double p, + bool training, + bool inplace) { + if (p < 0. || p > 1.) { + TORCH_CHECK( + false, "dropout probability has to be between 0 and 1, but got ", p); + } + return inplace ? torch::feature_alpha_dropout_(input, p, training) + : torch::feature_alpha_dropout(input, p, training); +} + +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.feature_alpha_dropout +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::FeatureAlphaDropoutFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::feature_alpha_dropout(input, +/// F::FeatureAlphaDropoutFuncOptions().p(0.5).training(false)); +/// ``` +inline Tensor feature_alpha_dropout( + Tensor input, + const FeatureAlphaDropoutFuncOptions& options = {}) { + return detail::feature_alpha_dropout( + std::move(input), options.p(), options.training(), options.inplace()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/embedding.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/embedding.h new file mode 100644 index 0000000000000000000000000000000000000000..37f373774e91357e2def5fc319f28c6da70a1411 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/embedding.h @@ -0,0 +1,211 @@ +#pragma once + +#include + +namespace torch { +namespace nn { +namespace functional { + +inline Tensor one_hot(const Tensor& tensor, int64_t num_classes = -1) { + return torch::one_hot(tensor, num_classes); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline void _no_grad_embedding_renorm_( + Tensor weight, + const Tensor& input, + float max_norm, + float norm_type) { + torch::NoGradGuard no_grad; + torch::embedding_renorm_(weight, input, max_norm, norm_type); +} + +inline Tensor embedding( + const Tensor& input, + const Tensor& weight, + c10::optional padding_idx, + c10::optional max_norm, + double norm_type, + bool scale_grad_by_freq, + bool sparse) { + auto input_ = input; + + if (padding_idx != c10::nullopt) { + if (*padding_idx > 0) { + TORCH_CHECK( + *padding_idx < weight.size(0), + "Padding_idx must be within num_embeddings"); + } else if (*padding_idx < 0) { + TORCH_CHECK( + *padding_idx >= -weight.size(0), + "Padding_idx must be within num_embedding"); + padding_idx = weight.size(0) + *padding_idx; + } + } else { + padding_idx = -1; + } + + if (max_norm != c10::nullopt) { + input_ = input_.contiguous(); + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + _no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type); + } + return torch::embedding( + weight, input_, *padding_idx, scale_grad_by_freq, sparse); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.embedding +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::EmbeddingFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::embedding(input, weight, +/// F::EmbeddingFuncOptions().norm_type(2.5).scale_grad_by_freq(true).sparse(true)); +/// ``` +inline Tensor embedding( + const Tensor& input, + const Tensor& weight, + const EmbeddingFuncOptions& options = {}) { + return detail::embedding( + input, + weight, + options.padding_idx(), + options.max_norm(), + options.norm_type(), + options.scale_grad_by_freq(), + options.sparse()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor embedding_bag( + const Tensor& input, + const Tensor& weight, + const Tensor& offsets, + c10::optional max_norm, + double norm_type, + bool scale_grad_by_freq, + EmbeddingBagMode mode, + bool sparse, + const Tensor& per_sample_weights, + bool include_last_offset, + c10::optional padding_idx) { + auto input_ = input; + auto offsets_ = offsets; + auto per_sample_weights_ = per_sample_weights; + TORCH_CHECK( + !per_sample_weights_.defined() || + input_.sizes() == per_sample_weights_.sizes(), + "embedding_bag: If per_sample_weights (", + per_sample_weights_.sizes(), + ") is not null, then it must have the same shape as the input (", + input_.sizes(), + ")"); + if (input_.dim() == 2) { + TORCH_CHECK( + !offsets_.defined(), + "If input is 2D, then offsets has to be null, as input is treated is a mini-batch of fixed length sequences. However, found offsets of type Tensor"); + offsets_ = torch::arange( + 0, + input_.numel(), + input_.size(1), + torch::TensorOptions().dtype(torch::kLong).device(input_.device())); + input_ = input_.reshape(-1); + if (per_sample_weights_.defined()) { + per_sample_weights_ = per_sample_weights_.reshape(-1); + } + } else if (input_.dim() == 1) { + TORCH_CHECK( + offsets_.defined(), "offsets has to be a 1D Tensor but got null"); + TORCH_CHECK(offsets_.dim() == 1, "offsets has to be a 1D Tensor"); + } else { + TORCH_CHECK( + false, + "input has to be 1D or 2D Tensor, but got Tensor of dimension ", + input_.dim()); + } + + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int mode_enum; + if (std::holds_alternative(mode)) { + mode_enum = 0; + } else if (std::holds_alternative(mode)) { + mode_enum = 1; + } else if (std::holds_alternative(mode)) { + mode_enum = 2; + TORCH_CHECK( + !scale_grad_by_freq, + "max mode does not support scaling the gradient by the frequency"); + TORCH_CHECK(!sparse, "max mode does not support sparse weights"); + } else { + TORCH_CHECK(false, "mode has to be one of sum, mean or max"); + } + + if (max_norm != c10::nullopt) { + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + _no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type); + } + + TORCH_CHECK( + !per_sample_weights_.defined() || std::get_if(&mode), + "embedding_bag: per_sample_weights was not null. ", + "per_sample_weights is only supported for mode='kSum' (got mode='", + torch::enumtype::get_enum_name(mode), + "').Please open a feature request on GitHub."); + + return std::get<0>(torch::embedding_bag( + weight, + input_, + offsets_, + scale_grad_by_freq, + mode_enum, + sparse, + per_sample_weights_, + include_last_offset, + padding_idx)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.embedding_bag +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::EmbeddingBagFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::embedding_bag(input, weight, +/// F::EmbeddingBagFuncOptions().mode(torch::kSum).offsets(offsets)); +/// ``` +inline Tensor embedding_bag( + const Tensor& input, + const Tensor& weight, + const EmbeddingBagFuncOptions& options = {}) { + return detail::embedding_bag( + input, + weight, + options.offsets(), + options.max_norm(), + options.norm_type(), + options.scale_grad_by_freq(), + options.mode(), + options.sparse(), + options.per_sample_weights(), + options.include_last_offset(), + options.padding_idx()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/fold.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/fold.h new file mode 100644 index 0000000000000000000000000000000000000000..cd47138e32e9e63ffd79a53cf5ea4a22cf411140 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/fold.h @@ -0,0 +1,102 @@ +#pragma once + +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor fold( + const Tensor& input, + ExpandingArray<2> output_size, + ExpandingArray<2> kernel_size, + ExpandingArray<2> dilation, + ExpandingArray<2> padding, + ExpandingArray<2> stride) { + if (input.dim() == 3 || input.dim() == 2) { + return torch::col2im( + input, output_size, kernel_size, dilation, padding, stride); + } else { + TORCH_CHECK( + false, + "Input Error: Only unbatched (2D) or batched (3D) input Tensors are supported " + "(got ", + input.dim(), + "D)"); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.fold +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::FoldFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2})); +/// ``` +inline Tensor fold(const Tensor& input, const FoldFuncOptions& options) { + return detail::fold( + input, + options.output_size(), + options.kernel_size(), + options.dilation(), + options.padding(), + options.stride()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor unfold( + const Tensor& input, + ExpandingArray<2> kernel_size, + ExpandingArray<2> dilation, + ExpandingArray<2> padding, + ExpandingArray<2> stride) { + if (input.dim() == 4) { + return torch::im2col(input, kernel_size, dilation, padding, stride); + } else { + TORCH_CHECK( + false, + "Input Error: Only 4D input Tensors are supported " + "(got ", + input.dim(), + "D)"); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.unfold +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::UnfoldFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2)); +/// ``` +inline Tensor unfold(const Tensor& input, const UnfoldFuncOptions& options) { + return detail::unfold( + input, + options.kernel_size(), + options.dilation(), + options.padding(), + options.stride()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/instancenorm.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/instancenorm.h new file mode 100644 index 0000000000000000000000000000000000000000..bfa42a32f7940ecad7111e432bd1c1ce8b6a48eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/instancenorm.h @@ -0,0 +1,63 @@ +#pragma once + +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor instance_norm( + const Tensor& input, + const Tensor& running_mean, + const Tensor& running_var, + const Tensor& weight, + const Tensor& bias, + bool use_input_stats, + double momentum, + double eps) { + return torch::instance_norm( + input, + weight, + bias, + running_mean, + running_var, + use_input_stats, + momentum, + eps, + at::globalContext().userEnabledCuDNN()); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.instance_norm +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::InstanceNormFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::instance_norm(input, +/// F::InstanceNormFuncOptions().running_mean(mean).running_var(variance).weight(weight).bias(bias).momentum(0.1).eps(1e-5)); +/// ``` +inline Tensor instance_norm( + const Tensor& input, + const InstanceNormFuncOptions& options = {}) { + return detail::instance_norm( + input, + options.running_mean(), + options.running_var(), + options.weight(), + options.bias(), + options.use_input_stats(), + options.momentum(), + options.eps()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/linear.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/linear.h new file mode 100644 index 0000000000000000000000000000000000000000..ffeafcd712af0421a23fe6f344f032a35e40c674 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/linear.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +namespace torch { +namespace nn { +namespace functional { + +inline Tensor bilinear( + const Tensor& input1, + const Tensor& input2, + const Tensor& weight, + const Tensor& bias = Tensor()) { + return torch::bilinear(input1, input2, weight, bias); +} + +// ============================================================================ + +inline Tensor linear( + const Tensor& input, + const Tensor& weight, + const Tensor& bias = {}) { + if (input.dim() == 2 && bias.defined()) { + // fused op is marginally faster + return torch::addmm(bias, input, weight.t()); + } else { + auto output = input.matmul(weight.t()); + if (bias.defined()) { + output += bias; + } + return output; + } +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/loss.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/loss.h new file mode 100644 index 0000000000000000000000000000000000000000..691ba4ce3041e351239d2a5c2cd16ea614f57319 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/loss.h @@ -0,0 +1,1044 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor l1_loss( + const Tensor& input, + const Tensor& target, + L1LossFuncOptions::reduction_t reduction) { + return torch::l1_loss(input, target, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.l1_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::L1LossFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone)); +/// ``` +inline Tensor l1_loss( + const Tensor& input, + const Tensor& target, + const L1LossFuncOptions& options = {}) { + return detail::l1_loss(input, target, options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor kl_div( + const Tensor& input, + const Tensor& target, + KLDivFuncOptions::reduction_t reduction, + bool log_target = false) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + torch::Reduction::Reduction reduction_enum; + + if (std::holds_alternative(reduction)) { + TORCH_WARN( + "reduction: 'mean' divides the total loss by both the batch size and the support size." + "'batchmean' divides only by the batch size, and aligns with the KL div math definition." + "'mean' will be changed to behave the same as 'batchmean' in the next major release."); + } + + // special case for batchmean + if (std::holds_alternative(reduction)) { + reduction_enum = torch::Reduction::Sum; + } else { + reduction_enum = enumtype::reduction_get_enum(reduction); + } + + auto reduced = torch::kl_div(input, target, reduction_enum, log_target); + + if (std::holds_alternative(reduction) && + input.dim() != 0) { + reduced = reduced / input.sizes()[0]; + } + + return reduced; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.kl_div +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::KLDivFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::kl_div(input, target, +/// F::KLDivFuncOptions.reduction(torch::kNone).log_target(false)); +/// ``` +inline Tensor kl_div( + const Tensor& input, + const Tensor& target, + const KLDivFuncOptions& options = {}) { + return detail::kl_div( + input, target, options.reduction(), options.log_target()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor mse_loss( + const Tensor& input, + const Tensor& target, + MSELossFuncOptions::reduction_t reduction) { + if (!(target.sizes() == input.sizes())) { + TORCH_WARN( + "Using a target size (", + target.sizes(), + ") that is different to the input size (", + input.sizes(), + "). ", + "This will likely lead to incorrect results due to broadcasting. ", + "Please ensure they have the same size."); + } + std::vector broadcast_tensors = + torch::broadcast_tensors({input, target}); + auto expanded_input = broadcast_tensors[0]; + auto expanded_target = broadcast_tensors[1]; + return torch::mse_loss( + expanded_input, expanded_target, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.mse_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MSELossFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone)); +/// ``` +inline Tensor mse_loss( + const Tensor& input, + const Tensor& target, + const MSELossFuncOptions& options = {}) { + return detail::mse_loss(input, target, options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor binary_cross_entropy( + const Tensor& input, + const Tensor& target, + const Tensor& weight, + BinaryCrossEntropyFuncOptions::reduction_t reduction) { + auto reduction_enum = enumtype::reduction_get_enum(reduction); + + if (target.sizes() != input.sizes()) { + TORCH_CHECK( + false, + "Using a target size (", + target.sizes(), + ") ", + "that is different to the input size (", + input.sizes(), + ") is deprecated. ", + "Please ensure they have the same size."); + } + + auto weight_ = weight; + if (weight_.defined()) { + auto new_size = at::infer_size(target.sizes(), weight_.sizes()); + weight_ = weight_.expand(new_size); + } + + return torch::binary_cross_entropy(input, target, weight_, reduction_enum); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.binary_cross_entropy +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::BinaryCrossEntropyFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::binary_cross_entropy(input, target, +/// F::BinaryCrossEntropyFuncOptions().weight(weight)); +/// ``` +inline Tensor binary_cross_entropy( + const Tensor& input, + const Tensor& target, + const BinaryCrossEntropyFuncOptions& options = {}) { + return detail::binary_cross_entropy( + input, target, options.weight(), options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor hinge_embedding_loss( + const Tensor& input, + const Tensor& target, + double margin, + HingeEmbeddingLossFuncOptions::reduction_t reduction) { + return torch::hinge_embedding_loss( + input, target, margin, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hinge_embedding_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::HingeEmbeddingLossFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::hinge_embedding_loss(input, target, +/// F::HingeEmbeddingLossFuncOptions().margin(2)); +/// ``` +inline Tensor hinge_embedding_loss( + const Tensor& input, + const Tensor& target, + const HingeEmbeddingLossFuncOptions& options = {}) { + return detail::hinge_embedding_loss( + input, target, options.margin(), options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor multi_margin_loss( + const Tensor& input, + const Tensor& target, + int64_t p, + double margin, + const Tensor& weight, + MultiMarginLossFuncOptions::reduction_t reduction) { + TORCH_CHECK(p == 1 || p == 2, "only p == 1 and p == 2 supported"); + if (weight.defined()) { + TORCH_CHECK(weight.dim() == 1, "weight must be one-dimensional"); + } + + return torch::multi_margin_loss( + input, + target, + p, + margin, + weight, + enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multi_margin_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::MultiMarginLossFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multi_margin_loss(input, target, +/// F::MultiMarginLossFuncOptions().margin(2).weight(weight)); +/// ``` +inline Tensor multi_margin_loss( + const Tensor& input, + const Tensor& target, + const MultiMarginLossFuncOptions& options = {}) { + return detail::multi_margin_loss( + input, + target, + options.p(), + options.margin(), + options.weight(), + options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor cosine_embedding_loss( + const Tensor& input1, + const Tensor& input2, + const Tensor& target, + double margin, + CosineEmbeddingLossFuncOptions::reduction_t reduction) { + return torch::cosine_embedding_loss( + input1, input2, target, margin, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cosine_embedding_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::CosineEmbeddingLossFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cosine_embedding_loss(input1, input2, target, +/// F::CosineEmbeddingLossFuncOptions().margin(0.5)); +/// ``` +inline Tensor cosine_embedding_loss( + const Tensor& input1, + const Tensor& input2, + const Tensor& target, + const CosineEmbeddingLossFuncOptions& options = {}) { + return detail::cosine_embedding_loss( + input1, input2, target, options.margin(), options.reduction()); +} + +// ============================================================================ + +inline Tensor _smooth_l1_loss( + const Tensor& input, + const Tensor& target, + double beta = 1.) { + auto t = torch::abs(input - target); + return torch::where(t < beta, 0.5 * torch::pow(t, 2) / beta, t - 0.5 * beta); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor smooth_l1_loss( + const Tensor& input, + const Tensor& target, + SmoothL1LossFuncOptions::reduction_t reduction, + c10::optional beta_opt = c10::nullopt) { + if (target.sizes() != input.sizes()) { + TORCH_WARN( + "Using a target size (", + target.sizes(), + ") that is different to the input size (", + input.sizes(), + "). ", + "This will likely lead to incorrect results due to broadcasting. ", + "Please ensure they have the same size."); + } + double beta = beta_opt.value_or(1.0); + + std::vector expanded_tensors = + torch::broadcast_tensors({input, target}); + return torch::smooth_l1_loss( + expanded_tensors[0], + expanded_tensors[1], + enumtype::reduction_get_enum(reduction), + beta); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.smooth_l1_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SmoothL1LossFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone)); +/// ``` +inline Tensor smooth_l1_loss( + const Tensor& input, + const Tensor& target, + const SmoothL1LossFuncOptions& options = {}) { + return detail::smooth_l1_loss( + input, target, options.reduction(), options.beta()); +} + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.smooth_l1_loss +/// about the exact behavior of this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::smooth_l1_loss(input, target, /*options=*/torch::kNone, /*beta=*/0.5); +/// ``` +inline Tensor smooth_l1_loss( + const Tensor& input, + const Tensor& target, + const SmoothL1LossFuncOptions& options, + double beta) { + TORCH_CHECK( + options.beta() == c10::nullopt, + "expected beta not to be provided in 'options', but got ", + options.beta().value()); + return detail::smooth_l1_loss(input, target, options.reduction(), beta); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor huber_loss( + const Tensor& input, + const Tensor& target, + HuberLossFuncOptions::reduction_t reduction, + double delta = 1.) { + if (target.sizes() != input.sizes()) { + TORCH_WARN( + "Using a target size (", + target.sizes(), + ") that is different to the input size (", + input.sizes(), + "). ", + "This will likely lead to incorrect results due to broadcasting. ", + "Please ensure they have the same size."); + } + + std::vector expanded_tensors = + torch::broadcast_tensors({input, target}); + return torch::huber_loss( + expanded_tensors[0], + expanded_tensors[1], + enumtype::reduction_get_enum(reduction), + delta); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.huber_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::HuberLossFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::huber_loss(input, target, +/// F::HuberLossFuncOptions().reduction(torch::kNone).delta(0.5)); +/// ``` +inline Tensor huber_loss( + const Tensor& input, + const Tensor& target, + const HuberLossFuncOptions& options = {}) { + return detail::huber_loss( + input, target, options.reduction(), options.delta()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor multilabel_margin_loss( + const Tensor& input, + const Tensor& target, + MultilabelMarginLossFuncOptions::reduction_t reduction) { + return torch::multilabel_margin_loss( + input, target, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multilabel_margin_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::MultilabelMarginLossFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multilabel_margin_loss(input, target, +/// F::MultilabelMarginLossFuncOptions(torch::kNone)); +/// ``` +inline Tensor multilabel_margin_loss( + const Tensor& input, + const Tensor& target, + const MultilabelMarginLossFuncOptions& options = {}) { + return detail::multilabel_margin_loss(input, target, options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor soft_margin_loss( + const Tensor& input, + const Tensor& target, + SoftMarginLossFuncOptions::reduction_t reduction) { + return torch::soft_margin_loss( + input, target, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.soft_margin_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SoftMarginLossFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::soft_margin_loss(input, target, +/// F::SoftMarginLossFuncOptions(torch::kNone)); +/// ``` +inline Tensor soft_margin_loss( + const Tensor& input, + const Tensor& target, + const SoftMarginLossFuncOptions& options = {}) { + return detail::soft_margin_loss(input, target, options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor multilabel_soft_margin_loss( + const Tensor& input, + const Tensor& target, + const Tensor& weight, + MultilabelSoftMarginLossFuncOptions::reduction_t reduction) { + auto loss = + -(target * torch::log_sigmoid(input) + + (1 - target) * torch::log_sigmoid(-input)); + if (weight.defined()) { + loss = loss * weight; + } + + auto class_dim = input.dim() - 1; + auto C = input.size(class_dim); + loss = loss.sum(class_dim) / C; // only return N loss values + + Tensor ret; + + if (std::holds_alternative(reduction)) { + ret = loss; + } else if (std::holds_alternative(reduction)) { + ret = loss.mean(); + } else if (std::holds_alternative(reduction)) { + ret = loss.sum(); + } else { + ret = input; + TORCH_INTERNAL_ASSERT( + false, enumtype::get_enum_name(reduction), " is not valid"); + } + return ret; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multilabel_soft_margin_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::MultilabelSoftMarginLossFuncOptions` class to learn +/// what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multilabel_soft_margin_loss(input, target, +/// F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight)); +/// ``` +inline Tensor multilabel_soft_margin_loss( + const Tensor& input, + const Tensor& target, + const MultilabelSoftMarginLossFuncOptions& options = {}) { + return detail::multilabel_soft_margin_loss( + input, target, options.weight(), options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor triplet_margin_loss( + const Tensor& anchor, + const Tensor& positive, + const Tensor& negative, + double margin, + double p, + double eps, + bool swap, + TripletMarginLossFuncOptions::reduction_t reduction) { + return torch::triplet_margin_loss( + anchor, + positive, + negative, + margin, + p, + eps, + swap, + enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.triplet_margin_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::TripletMarginLossFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::triplet_margin_loss(anchor, positive, negative, +/// F::TripletMarginLossFuncOptions().margin(1.0)); +/// ``` +inline Tensor triplet_margin_loss( + const Tensor& anchor, + const Tensor& positive, + const Tensor& negative, + const TripletMarginLossFuncOptions& options = {}) { + return detail::triplet_margin_loss( + anchor, + positive, + negative, + options.margin(), + options.p(), + options.eps(), + options.swap(), + options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor triplet_margin_with_distance_loss( + const Tensor& anchor, + const Tensor& positive, + const Tensor& negative, + c10::optional + distance_function, + double margin, + bool swap, + TripletMarginWithDistanceLossFuncOptions::reduction_t reduction) { + Tensor dist_pos, dist_neg; + if (distance_function.has_value()) { + auto distance_function_impl = distance_function.value(); + dist_pos = distance_function_impl(anchor, positive); + dist_neg = distance_function_impl(anchor, negative); + } else { + dist_pos = pairwise_distance(anchor, positive); + dist_neg = pairwise_distance(anchor, negative); + } + + if (swap) { + Tensor dist_swap; + if (distance_function.has_value()) { + dist_swap = distance_function.value()(positive, negative); + } else { + dist_swap = pairwise_distance(positive, negative); + } + dist_neg = torch::min(dist_neg, dist_swap); + } + + auto loss = torch::clamp_min(dist_pos - dist_neg + margin, 0); + + Tensor ret; + if (std::holds_alternative(reduction)) { + ret = loss; + } else if (std::holds_alternative(reduction)) { + ret = loss.mean(); + } else if (std::holds_alternative(reduction)) { + ret = loss.sum(); + } else { + ret = anchor; + TORCH_INTERNAL_ASSERT( + false, enumtype::get_enum_name(reduction), " is not valid"); + } + return ret; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.triplet_margin_with_distance_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::TripletMarginWithDistanceLossFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::triplet_margin_with_distance_loss(anchor, positive, negative, +/// F::TripletMarginWithDistanceLossFuncOptions().margin(1.0)); +/// ``` +inline Tensor triplet_margin_with_distance_loss( + const Tensor& anchor, + const Tensor& positive, + const Tensor& negative, + const TripletMarginWithDistanceLossFuncOptions& options = {}) { + return detail::triplet_margin_with_distance_loss( + anchor, + positive, + negative, + options.distance_function(), + options.margin(), + options.swap(), + options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor ctc_loss( + const Tensor& log_probs, + const Tensor& targets, + const Tensor& input_lengths, + const Tensor& target_lengths, + int64_t blank, + CTCLossFuncOptions::reduction_t reduction, + bool zero_infinity) { + return torch::ctc_loss( + log_probs, + targets, + input_lengths, + target_lengths, + blank, + enumtype::reduction_get_enum(reduction), + zero_infinity); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.ctc_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::CTCLossFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::ctc_loss(log_probs, targets, input_lengths, target_lengths, +/// F::CTCLossFuncOptions().reduction(torch::kNone)); +/// ``` +inline Tensor ctc_loss( + const Tensor& log_probs, + const Tensor& targets, + const Tensor& input_lengths, + const Tensor& target_lengths, + const CTCLossFuncOptions& options = {}) { + return detail::ctc_loss( + log_probs, + targets, + input_lengths, + target_lengths, + options.blank(), + options.reduction(), + options.zero_infinity()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor poisson_nll_loss( + const Tensor& input, + const Tensor& target, + bool log_input, + bool full, + double eps, + PoissonNLLLossFuncOptions::reduction_t reduction) { + return torch::poisson_nll_loss( + input, + target, + log_input, + full, + eps, + enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.poisson_nll_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::PoissonNLLLossFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::poisson_nll_loss(input, target, +/// F::PoissonNLLLossFuncOptions().reduction(torch::kNone)); +/// ``` +inline Tensor poisson_nll_loss( + const Tensor& input, + const Tensor& target, + const PoissonNLLLossFuncOptions& options = {}) { + return detail::poisson_nll_loss( + input, + target, + options.log_input(), + options.full(), + options.eps(), + options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor margin_ranking_loss( + const Tensor& input1, + const Tensor& input2, + const Tensor& target, + double margin, + MarginRankingLossFuncOptions::reduction_t reduction) { + TORCH_CHECK( + input1.dim() == input2.dim() && input1.dim() == target.dim(), + "margin_ranking_loss : All input tensors should have same dimension but got sizes: " + "input1: ", + input1.sizes(), + ", input2: ", + input2.sizes(), + ", target: ", + target.sizes()); + return torch::margin_ranking_loss( + input1, input2, target, margin, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.margin_ranking_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::MarginRankingLossFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::margin_ranking_loss(input1, input2, target, +/// F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum)); +/// ``` +inline Tensor margin_ranking_loss( + const Tensor& input1, + const Tensor& input2, + const Tensor& target, + const MarginRankingLossFuncOptions& options = {}) { + return detail::margin_ranking_loss( + input1, input2, target, options.margin(), options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor nll_loss( + const Tensor& input, + const Tensor& target, + const Tensor& weight, + int64_t ignore_index, + const NLLLossFuncOptions::reduction_t reduction) { + if (input.dim() < 2) { + TORCH_CHECK(false, "Expected 2 or more dimensions (got ", input.dim(), ")"); + } + + if (input.sizes()[0] != target.sizes()[0]) { + TORCH_CHECK( + false, + "Expected input batch_size (", + input.sizes()[0], + ") to match target batch_size (", + target.sizes()[0], + ")."); + } + + return torch::nll_loss_nd( + input, + target, + weight, + enumtype::reduction_get_enum(reduction), + ignore_index); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.nll_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::NLLLossFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::nll_loss(input, target, +/// F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +inline Tensor nll_loss( + const Tensor& input, + const Tensor& target, + const NLLLossFuncOptions& options = {}) { + return detail::nll_loss( + input, + target, + options.weight(), + options.ignore_index(), + options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor cross_entropy( + const Tensor& input, + const Tensor& target, + const Tensor& weight, + int64_t ignore_index, + CrossEntropyFuncOptions::reduction_t reduction, + double label_smoothing) { + return torch::cross_entropy_loss( + input, + target, + weight, + enumtype::reduction_get_enum(reduction), + ignore_index, + label_smoothing); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cross_entropy +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::CrossEntropyFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cross_entropy(input, target, +/// F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +inline Tensor cross_entropy( + const Tensor& input, + const Tensor& target, + const CrossEntropyFuncOptions& options = {}) { + return detail::cross_entropy( + input, + target, + options.weight(), + options.ignore_index(), + options.reduction(), + options.label_smoothing()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor binary_cross_entropy_with_logits( + const Tensor& input, + const Tensor& target, + const Tensor& weight, + BinaryCrossEntropyWithLogitsFuncOptions::reduction_t reduction, + const Tensor& pos_weight) { + TORCH_CHECK( + target.sizes() == input.sizes(), + "Target size (", + target.sizes(), + ") must be the same as input size (", + input.sizes(), + ")"); + + return torch::binary_cross_entropy_with_logits( + input, + target, + weight, + pos_weight, + enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.binary_cross_entropy_with_logits +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::binary_cross_entropy_with_logits(input, target, +/// F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum)); +/// ``` +inline Tensor binary_cross_entropy_with_logits( + const Tensor& input, + const Tensor& target, + const BinaryCrossEntropyWithLogitsFuncOptions& options = {}) { + return detail::binary_cross_entropy_with_logits( + input, + target, + options.weight(), + options.reduction(), + options.pos_weight()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/normalization.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/normalization.h new file mode 100644 index 0000000000000000000000000000000000000000..80e1a933dfb3c7e95f6087b472eda3e34ec93691 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/normalization.h @@ -0,0 +1,211 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor normalize( + const Tensor& input, + double p, + int64_t dim, + double eps, + c10::optional out) { + if (out == c10::nullopt) { + auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input); + return input / denom; + } else { + auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input); + return torch::div_out(*out, input, denom); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.normalize +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::NormalizeFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1)); +/// ``` +inline Tensor normalize( + const Tensor& input, + NormalizeFuncOptions options = {}) { + return detail::normalize( + input, options.p(), options.dim(), options.eps(), options.out()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor layer_norm( + const Tensor& input, + const std::vector& normalized_shape, + const Tensor& weight, + const Tensor& bias, + double eps) { + return torch::layer_norm(input, normalized_shape, weight, bias, eps); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.layer_norm +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::LayerNormFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5)); +/// ``` +inline Tensor layer_norm( + const Tensor& input, + const LayerNormFuncOptions& options) { + return detail::layer_norm( + input, + options.normalized_shape(), + options.weight(), + options.bias(), + options.eps()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor local_response_norm( + const Tensor& input, + int64_t size, + double alpha, + double beta, + double k) { + auto dim = input.dim(); + TORCH_CHECK( + dim >= 3, + "Expected 3D or higher dimensionality input (got ", + dim, + " dimensions)"); + auto div = input.mul(input).unsqueeze(1); + if (dim == 3) { + div = detail::pad( + div, + /*pad=*/{0, 0, size / 2, (size - 1) / 2}, + /*mode=*/torch::kConstant, + /*value=*/0); + div = detail::avg_pool2d( + div, + /*kernel_size=*/{size, 1}, + /*stride=*/1, + /*padding=*/0, + /*ceil_mode=*/false, + /*count_include_pad=*/true, + /*divisor_override=*/c10::nullopt) + .squeeze(1); + } else { + auto sizes = input.sizes(); + div = div.view({sizes[0], 1, sizes[1], sizes[2], -1}); + div = detail::pad( + div, + /*pad=*/{0, 0, 0, 0, size / 2, (size - 1) / 2}, + /*mode=*/torch::kConstant, + /*value=*/0); + div = detail::avg_pool3d( + div, + /*kernel_size=*/{size, 1, 1}, + /*stride=*/1, + /*padding=*/0, + /*ceil_mode=*/false, + /*count_include_pad=*/true, + /*divisor_override=*/c10::nullopt) + .squeeze(1); + div = div.view(sizes); + } + div = div.mul(alpha).add(k).pow(beta); + return input / div; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.local_response_norm +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::LocalResponseNormFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::local_response_norm(x, F::LocalResponseNormFuncOptions(2)); +/// ``` +inline Tensor local_response_norm( + const Tensor& input, + const LocalResponseNormFuncOptions& options) { + return detail::local_response_norm( + input, options.size(), options.alpha(), options.beta(), options.k()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor group_norm( + const Tensor& input, + int64_t num_groups, + const Tensor& weight, + const Tensor& bias, + double eps) { + return torch::group_norm( + input, + num_groups, + weight, + bias, + eps, + at::globalContext().userEnabledCuDNN()); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.group_norm +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::GroupNormFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::group_norm(input, F::GroupNormFuncOptions(2).eps(2e-5)); +/// ``` +inline Tensor group_norm( + const Tensor& input, + const GroupNormFuncOptions& options) { + return detail::group_norm( + input, + options.num_groups(), + options.weight(), + options.bias(), + options.eps()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/padding.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/padding.h new file mode 100644 index 0000000000000000000000000000000000000000..7bbf75cfa75547cbe616c307a4e0cbfead88a9c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/padding.h @@ -0,0 +1,58 @@ +#pragma once + +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor pad( + const Tensor& input, + IntArrayRef pad, + PadFuncOptions::mode_t mode, + double value) { + const auto mode_enum = [&] { + if (std::holds_alternative(mode)) { + return at::padding_mode::constant; + } else if (std::holds_alternative(mode)) { + return at::padding_mode::reflect; + } else if (std::holds_alternative(mode)) { + return at::padding_mode::replicate; + } else if (std::holds_alternative(mode)) { + return at::padding_mode::circular; + } + TORCH_CHECK(false, "Unrecognised padding mode"); + }(); + + c10::optional fill_value; + if (value != 0.0) { + fill_value = value; + } + return at::_pad_enum(input, pad, static_cast(mode_enum), fill_value); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pad +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::PadFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::pad(input, F::PadFuncOptions({1, 2, 2, 1, 1, +/// 2}).mode(torch::kReplicate)); +/// ``` +inline Tensor pad(const Tensor& input, const PadFuncOptions& options) { + return detail::pad(input, options.pad(), options.mode(), options.value()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pixelshuffle.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pixelshuffle.h new file mode 100644 index 0000000000000000000000000000000000000000..6b962cf814b105562b3d926e0a41c6a7e05658b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pixelshuffle.h @@ -0,0 +1,47 @@ +#pragma once + +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor pixel_shuffle(const Tensor& input, int64_t upscale_factor) { + return torch::pixel_shuffle(input, upscale_factor); +} + +inline Tensor pixel_unshuffle(const Tensor& input, int64_t downscale_factor) { + return torch::pixel_unshuffle(input, downscale_factor); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pixel_shuffle +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::PixelShuffleFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2)); +/// ``` +inline Tensor pixel_shuffle( + const Tensor& input, + const PixelShuffleFuncOptions& options) { + return detail::pixel_shuffle(input, options.upscale_factor()); +} + +inline Tensor pixel_unshuffle( + const Tensor& input, + const PixelUnshuffleFuncOptions& options) { + return detail::pixel_unshuffle(input, options.downscale_factor()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pooling.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pooling.h new file mode 100644 index 0000000000000000000000000000000000000000..8cfa30ca2ce9cefc4845db3e819738f730d08c91 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pooling.h @@ -0,0 +1,1153 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor avg_pool1d( + const Tensor& input, + ExpandingArray<1> kernel_size, + ExpandingArray<1> stride, + ExpandingArray<1> padding, + bool ceil_mode, + bool count_include_pad) { + return torch::avg_pool1d( + input, kernel_size, stride, padding, ceil_mode, count_include_pad); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool1d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::AvgPool1dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2)); +/// ``` +inline Tensor avg_pool1d( + const Tensor& input, + const AvgPool1dFuncOptions& options) { + return avg_pool1d( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.ceil_mode(), + options.count_include_pad()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor avg_pool2d( + const Tensor& input, + ExpandingArray<2> kernel_size, + ExpandingArray<2> stride, + ExpandingArray<2> padding, + bool ceil_mode, + bool count_include_pad, + c10::optional divisor_override) { + return torch::avg_pool2d( + input, + kernel_size, + stride, + padding, + ceil_mode, + count_include_pad, + divisor_override); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool2d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::AvgPool2dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2)); +/// ``` +inline Tensor avg_pool2d( + const Tensor& input, + const AvgPool2dFuncOptions& options) { + return detail::avg_pool2d( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.ceil_mode(), + options.count_include_pad(), + options.divisor_override()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor avg_pool3d( + const Tensor& input, + ExpandingArray<3> kernel_size, + ExpandingArray<3> stride, + ExpandingArray<3> padding, + bool ceil_mode, + bool count_include_pad, + c10::optional divisor_override) { + return torch::avg_pool3d( + input, + kernel_size, + stride, + padding, + ceil_mode, + count_include_pad, + divisor_override); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool3d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::AvgPool3dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2)); +/// ``` +inline Tensor avg_pool3d( + const Tensor& input, + const AvgPool3dFuncOptions& options) { + return detail::avg_pool3d( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.ceil_mode(), + options.count_include_pad(), + options.divisor_override()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor max_pool1d( + const Tensor& input, + ExpandingArray<1> kernel_size, + ExpandingArray<1> stride, + ExpandingArray<1> padding, + ExpandingArray<1> dilation, + bool ceil_mode) { + return torch::max_pool1d( + input, kernel_size, stride, padding, dilation, ceil_mode); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool1d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MaxPool1dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2)); +/// ``` +inline Tensor max_pool1d( + const Tensor& input, + const MaxPool1dFuncOptions& options) { + return detail::max_pool1d( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.dilation(), + options.ceil_mode()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple max_pool1d_with_indices( + const Tensor& input, + ExpandingArray<1> kernel_size, + ExpandingArray<1> stride, + ExpandingArray<1> padding, + ExpandingArray<1> dilation, + bool ceil_mode) { + return torch::max_pool1d_with_indices( + input, kernel_size, stride, padding, dilation, ceil_mode); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for `torch::nn::functional::MaxPool1dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool1d_with_indices(x, F::MaxPool1dFuncOptions(3).stride(2)); +/// ``` +inline std::tuple max_pool1d_with_indices( + const Tensor& input, + const MaxPool1dFuncOptions& options) { + return detail::max_pool1d_with_indices( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.dilation(), + options.ceil_mode()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor max_pool2d( + const Tensor& input, + ExpandingArray<2> kernel_size, + ExpandingArray<2> stride, + ExpandingArray<2> padding, + ExpandingArray<2> dilation, + bool ceil_mode) { + return torch::max_pool2d( + input, kernel_size, stride, padding, dilation, ceil_mode); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool2d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MaxPool2dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2)); +/// ``` +inline Tensor max_pool2d( + const Tensor& input, + const MaxPool2dFuncOptions& options) { + return detail::max_pool2d( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.dilation(), + options.ceil_mode()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple max_pool2d_with_indices( + const Tensor& input, + ExpandingArray<2> kernel_size, + ExpandingArray<2> stride, + ExpandingArray<2> padding, + ExpandingArray<2> dilation, + bool ceil_mode) { + return torch::max_pool2d_with_indices( + input, kernel_size, stride, padding, dilation, ceil_mode); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for `torch::nn::functional::MaxPool2dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool2d_with_indices(x, F::MaxPool2dFuncOptions(3).stride(2)); +/// ``` +inline std::tuple max_pool2d_with_indices( + const Tensor& input, + const MaxPool2dFuncOptions& options) { + return detail::max_pool2d_with_indices( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.dilation(), + options.ceil_mode()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor max_pool3d( + const Tensor& input, + ExpandingArray<3> kernel_size, + ExpandingArray<3> stride, + ExpandingArray<3> padding, + ExpandingArray<3> dilation, + bool ceil_mode) { + return torch::max_pool3d( + input, kernel_size, stride, padding, dilation, ceil_mode); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool3d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MaxPool3dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2)); +/// ``` +inline Tensor max_pool3d( + const Tensor& input, + const MaxPool3dFuncOptions& options) { + return detail::max_pool3d( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.dilation(), + options.ceil_mode()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple max_pool3d_with_indices( + const Tensor& input, + ExpandingArray<3> kernel_size, + ExpandingArray<3> stride, + ExpandingArray<3> padding, + ExpandingArray<3> dilation, + bool ceil_mode) { + return torch::max_pool3d_with_indices( + input, kernel_size, stride, padding, dilation, ceil_mode); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for `torch::nn::functional::MaxPool3dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool3d_with_indices(x, F::MaxPool3dFuncOptions(3).stride(2)); +/// ``` +inline std::tuple max_pool3d_with_indices( + const Tensor& input, + const MaxPool3dFuncOptions& options) { + return detail::max_pool3d_with_indices( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.dilation(), + options.ceil_mode()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple adaptive_max_pool1d_with_indices( + const Tensor& input, + ExpandingArray<1> output_size) { + return torch::adaptive_max_pool1d(input, output_size); +} +} // namespace detail + +/// See the documentation for +/// `torch::nn::functional::AdaptiveMaxPool1dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool1d_with_indices(x, F::AdaptiveMaxPool1dFuncOptions(3)); +/// ``` +inline std::tuple adaptive_max_pool1d_with_indices( + const Tensor& input, + const AdaptiveMaxPool1dFuncOptions& options) { + return detail::adaptive_max_pool1d_with_indices(input, options.output_size()); +} + +namespace detail { +inline Tensor adaptive_max_pool1d( + const Tensor& input, + ExpandingArray<1> output_size) { + return std::get<0>(adaptive_max_pool1d_with_indices(input, output_size)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool1d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::AdaptiveMaxPool1dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3)); +/// ``` +inline Tensor adaptive_max_pool1d( + const Tensor& input, + const AdaptiveMaxPool1dFuncOptions& options) { + return detail::adaptive_max_pool1d(input, options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple adaptive_max_pool2d_with_indices( + const Tensor& input, + ExpandingArrayWithOptionalElem<2> output_size) { + auto output_size_ = + torch::nn::modules::utils::_list_with_default(output_size, input.sizes()); + return torch::adaptive_max_pool2d(input, output_size_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for +/// `torch::nn::functional::AdaptiveMaxPool2dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool2d_with_indices(x, F::AdaptiveMaxPool2dFuncOptions(3)); +/// ``` +inline std::tuple adaptive_max_pool2d_with_indices( + const Tensor& input, + const AdaptiveMaxPool2dFuncOptions& options) { + return detail::adaptive_max_pool2d_with_indices(input, options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor adaptive_max_pool2d( + const Tensor& input, + ExpandingArrayWithOptionalElem<2> output_size) { + return std::get<0>(adaptive_max_pool2d_with_indices(input, output_size)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool2d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::AdaptiveMaxPool2dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3)); +/// ``` +inline Tensor adaptive_max_pool2d( + const Tensor& input, + const AdaptiveMaxPool2dFuncOptions& options) { + return detail::adaptive_max_pool2d(input, options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple adaptive_max_pool3d_with_indices( + const Tensor& input, + ExpandingArrayWithOptionalElem<3> output_size) { + auto output_size_ = + torch::nn::modules::utils::_list_with_default(output_size, input.sizes()); + return torch::adaptive_max_pool3d(input, output_size_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for +/// `torch::nn::functional::AdaptiveMaxPool3dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool3d_with_indices(x, F::AdaptiveMaxPool3dFuncOptions(3)); +/// ``` +inline std::tuple adaptive_max_pool3d_with_indices( + const Tensor& input, + const AdaptiveMaxPool3dFuncOptions& options) { + return detail::adaptive_max_pool3d_with_indices(input, options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor adaptive_max_pool3d( + const Tensor& input, + ExpandingArrayWithOptionalElem<3> output_size) { + return std::get<0>(adaptive_max_pool3d_with_indices(input, output_size)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool3d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::AdaptiveMaxPool3dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3)); +/// ``` +inline Tensor adaptive_max_pool3d( + const Tensor& input, + const AdaptiveMaxPool3dFuncOptions& options) { + return detail::adaptive_max_pool3d(input, options.output_size()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor adaptive_avg_pool1d( + const Tensor& input, + ExpandingArray<1> output_size) { + return torch::adaptive_avg_pool1d(input, output_size); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool1d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::AdaptiveAvgPool1dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3)); +/// ``` +inline Tensor adaptive_avg_pool1d( + const Tensor& input, + const AdaptiveAvgPool1dFuncOptions& options) { + return detail::adaptive_avg_pool1d(input, options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor adaptive_avg_pool2d( + const Tensor& input, + ExpandingArrayWithOptionalElem<2> output_size) { + auto output_size_ = + torch::nn::modules::utils::_list_with_default(output_size, input.sizes()); + return torch::adaptive_avg_pool2d(input, output_size_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool2d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::AdaptiveAvgPool2dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3)); +/// ``` +inline Tensor adaptive_avg_pool2d( + const Tensor& input, + const AdaptiveAvgPool2dFuncOptions& options) { + return detail::adaptive_avg_pool2d(input, options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor adaptive_avg_pool3d( + const Tensor& input, + ExpandingArrayWithOptionalElem<3> output_size) { + auto output_size_ = + torch::nn::modules::utils::_list_with_default(output_size, input.sizes()); + return torch::adaptive_avg_pool3d(input, output_size_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool3d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::AdaptiveAvgPool3dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3)); +/// ``` +inline Tensor adaptive_avg_pool3d( + const Tensor& input, + const AdaptiveAvgPool3dFuncOptions& options) { + return detail::adaptive_avg_pool3d(input, options.output_size()); +} + +// ============================================================================ + +inline std::vector _unpool_output_size( + const Tensor& input, + const IntArrayRef& kernel_size, + const IntArrayRef& stride, + const IntArrayRef& padding, + const c10::optional>& output_size) { + auto input_size = input.sizes(); + std::vector default_size; + for (const auto d : c10::irange(kernel_size.size())) { + default_size.push_back( + (input_size[input_size.size() - kernel_size.size() + d] - 1) * + stride[d] + + kernel_size[d] - 2 * padding[d]); + } + if (!output_size) { + return default_size; + } else { + std::vector output_size_; + if (output_size->size() == kernel_size.size() + 2) { + output_size_ = IntArrayRef(*output_size).slice(2).vec(); + } + if (output_size_.size() != kernel_size.size()) { + TORCH_CHECK( + false, + "output_size should be a sequence containing ", + kernel_size.size(), + " or ", + kernel_size.size() + 2, + " elements, but it has a length of '", + output_size_.size(), + "'"); + } + for (const auto d : c10::irange(kernel_size.size())) { + const auto min_size = default_size[d] - stride[d]; + const auto max_size = default_size[d] + stride[d]; + if (!(min_size <= output_size_[d] && output_size_[d] <= max_size)) { + TORCH_CHECK( + false, + "invalid output_size ", + output_size_, + " (dim ", + d, + " must be between ", + min_size, + " and ", + max_size, + ")"); + } + } + return output_size_; + } +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor max_unpool1d( + const Tensor& input, + const Tensor& indices, + ExpandingArray<1> kernel_size, + ExpandingArray<1> stride, + ExpandingArray<1> padding, + const c10::optional>& output_size) { + auto output_size_ = + _unpool_output_size(input, kernel_size, stride, padding, output_size); + output_size_.push_back(1); + return torch::max_unpool2d( + input.unsqueeze(-1), indices.unsqueeze(-1), output_size_) + .squeeze(-1); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool1d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MaxUnpool1dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_unpool1d(x, indices, +/// F::MaxUnpool1dFuncOptions(3).stride(2).padding(1)); +/// ``` +inline Tensor max_unpool1d( + const Tensor& input, + const Tensor& indices, + const MaxUnpool1dFuncOptions& options) { + return detail::max_unpool1d( + input, + indices, + options.kernel_size(), + options.stride(), + options.padding(), + options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor max_unpool2d( + const Tensor& input, + const Tensor& indices, + ExpandingArray<2> kernel_size, + ExpandingArray<2> stride, + ExpandingArray<2> padding, + const c10::optional>& output_size) { + auto output_size_ = + _unpool_output_size(input, kernel_size, stride, padding, output_size); + + return torch::max_unpool2d(input, indices, output_size_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool2d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MaxUnpool2dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_unpool2d(x, indices, +/// F::MaxUnpool2dFuncOptions(3).stride(2).padding(1)); +/// ``` +inline Tensor max_unpool2d( + const Tensor& input, + const Tensor& indices, + const MaxUnpool2dFuncOptions& options) { + return detail::max_unpool2d( + input, + indices, + options.kernel_size(), + options.stride(), + options.padding(), + options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor max_unpool3d( + const Tensor& input, + const Tensor& indices, + ExpandingArray<3> kernel_size, + ExpandingArray<3> stride, + ExpandingArray<3> padding, + const c10::optional>& output_size) { + auto output_size_ = + _unpool_output_size(input, kernel_size, stride, padding, output_size); + + return torch::max_unpool3d(input, indices, output_size_, stride, padding); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool3d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MaxUnpool3dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3)); +/// ``` +inline Tensor max_unpool3d( + const Tensor& input, + const Tensor& indices, + const MaxUnpool3dFuncOptions& options) { + return detail::max_unpool3d( + input, + indices, + options.kernel_size(), + options.stride(), + options.padding(), + options.output_size()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple fractional_max_pool2d_with_indices( + const Tensor& input, + const ExpandingArray<2>& kernel_size, + const c10::optional>& output_size, + const c10::optional>& output_ratio, + const Tensor& _random_samples) { + if (output_size == c10::nullopt && output_ratio == c10::nullopt) { + TORCH_CHECK( + false, + "fractional_max_pool2d requires specifying either ", + "an output_size or an output_ratio"); + } + c10::optional> output_size_ = output_size; + if (output_size_ == c10::nullopt) { + TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt); + output_size_ = { + (int64_t)(static_cast(input.size(-2)) * + (*output_ratio.value())[0]), + (int64_t)(static_cast(input.size(-1)) * + (*output_ratio.value())[1])}; + } + + Tensor _random_samples_ = _random_samples; + if (!_random_samples_.defined()) { + auto n_batch = input.dim() == 3 ? 1 : input.size(0); + _random_samples_ = torch::rand( + {n_batch, input.size(-3), 2}, + torch::TensorOptions().dtype(input.dtype()).device(input.device())); + } + return torch::fractional_max_pool2d( + input, kernel_size, *output_size_, _random_samples_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for +/// `torch::nn::functional::FractionalMaxPool2dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fractional_max_pool2d_with_indices(x, +/// F::FractionalMaxPool2dFuncOptions(3).output_size(2)); +/// ``` +inline std::tuple fractional_max_pool2d_with_indices( + const Tensor& input, + const FractionalMaxPool2dFuncOptions& options) { + return detail::fractional_max_pool2d_with_indices( + input, + options.kernel_size(), + options.output_size(), + options.output_ratio(), + options._random_samples()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor fractional_max_pool2d( + const Tensor& input, + ExpandingArray<2> kernel_size, + c10::optional> output_size, + c10::optional> output_ratio, + const Tensor& _random_samples) { + return std::get<0>(fractional_max_pool2d_with_indices( + input, kernel_size, output_size, output_ratio, _random_samples)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for +/// `torch::nn::functional::FractionalMaxPool2dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fractional_max_pool2d(x, +/// F::FractionalMaxPool2dFuncOptions(3).output_size(2)); +/// ``` +inline Tensor fractional_max_pool2d( + const Tensor& input, + const FractionalMaxPool2dFuncOptions& options) { + return detail::fractional_max_pool2d( + input, + options.kernel_size(), + options.output_size(), + options.output_ratio(), + options._random_samples()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple fractional_max_pool3d_with_indices( + const Tensor& input, + const ExpandingArray<3>& kernel_size, + const c10::optional>& output_size, + const c10::optional>& output_ratio, + const Tensor& _random_samples) { + if (output_size == c10::nullopt && output_ratio == c10::nullopt) { + TORCH_CHECK( + false, + "fractional_max_pool3d requires specifying either ", + "an output_size or an output_ratio"); + } + + c10::optional> output_size_ = output_size; + if (output_size_ == c10::nullopt) { + TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt); + output_size_ = { + (int64_t)(static_cast(input.size(-3)) * + (*output_ratio.value())[0]), + (int64_t)(static_cast(input.size(-2)) * + (*output_ratio.value())[1]), + (int64_t)(static_cast(input.size(-1)) * + (*output_ratio.value())[2])}; + } + + Tensor _random_samples_ = _random_samples; + if (!_random_samples_.defined()) { + auto n_batch = input.dim() == 4 ? 1 : input.size(0); + _random_samples_ = torch::rand( + {n_batch, input.size(-4), 3}, + torch::TensorOptions().dtype(input.dtype()).device(input.device())); + } + return torch::fractional_max_pool3d( + input, kernel_size, *output_size_, _random_samples_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for +/// `torch::nn::functional::FractionalMaxPool3dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fractional_max_pool3d_with_indices(x, +/// F::FractionalMaxPool3dFuncOptions(3).output_size(2)); +/// ``` +inline std::tuple fractional_max_pool3d_with_indices( + const Tensor& input, + const FractionalMaxPool3dFuncOptions& options) { + return detail::fractional_max_pool3d_with_indices( + input, + options.kernel_size(), + options.output_size(), + options.output_ratio(), + options._random_samples()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor fractional_max_pool3d( + const Tensor& input, + ExpandingArray<3> kernel_size, + c10::optional> output_size, + c10::optional> output_ratio, + const Tensor& _random_samples) { + return std::get<0>(fractional_max_pool3d_with_indices( + input, kernel_size, output_size, output_ratio, _random_samples)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for +/// `torch::nn::functional::FractionalMaxPool3dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fractional_max_pool3d(x, +/// F::FractionalMaxPool3dFuncOptions(3).output_size(2)); +/// ``` +inline Tensor fractional_max_pool3d( + const Tensor& input, + const FractionalMaxPool3dFuncOptions& options) { + return detail::fractional_max_pool3d( + input, + options.kernel_size(), + options.output_size(), + options.output_ratio(), + options._random_samples()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor lp_pool1d( + const Tensor& input, + double norm_type, + ExpandingArray<1> kernel_size, + ExpandingArray<1> stride, + bool ceil_mode) { + Tensor out = detail::avg_pool1d( + input.pow(norm_type), + kernel_size, + stride, + /*padding=*/0, + ceil_mode, + /*count_include_pad=*/true); + + return (torch::sign(out) * relu(torch::abs(out))) + .mul((*kernel_size)[0]) + .pow(1. / norm_type); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool1d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::LPPool1dFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2)); +/// ``` +inline Tensor lp_pool1d( + const Tensor& input, + const LPPool1dFuncOptions& options) { + return detail::lp_pool1d( + input, + options.norm_type(), + options.kernel_size(), + options.stride(), + options.ceil_mode()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor lp_pool2d( + const Tensor& input, + double norm_type, + ExpandingArray<2> kernel_size, + ExpandingArray<2> stride, + bool ceil_mode) { + int kw = (*kernel_size)[0]; + int kh = (*kernel_size)[1]; + Tensor out = detail::avg_pool2d( + input.pow(norm_type), + kernel_size, + stride, + /*padding=*/0, + ceil_mode, + /*count_include_pad=*/true, + /*divisor_override=*/c10::nullopt); + + return (torch::sign(out) * relu(torch::abs(out))) + .mul(kw * kh) + .pow(1. / norm_type); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool2d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::LPPool2dFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2)); +/// ``` +inline Tensor lp_pool2d( + const Tensor& input, + const LPPool2dFuncOptions& options) { + return detail::lp_pool2d( + input, + options.norm_type(), + options.kernel_size(), + options.stride(), + options.ceil_mode()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor lp_pool3d( + const Tensor& input, + double norm_type, + ExpandingArray<3> kernel_size, + ExpandingArray<3> stride, + bool ceil_mode) { + int kd = (*kernel_size)[0]; + int kw = (*kernel_size)[1]; + int kh = (*kernel_size)[2]; + Tensor out = detail::avg_pool3d( + input.pow(norm_type), + kernel_size, + stride, + /*padding=*/0, + ceil_mode, + /*count_include_pad=*/true, + /*divisor_override=*/c10::nullopt); + + return (torch::sign(out) * relu(torch::abs(out))) + .mul(kd * kw * kh) + .pow(1. / norm_type); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool3d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::LPPool3dFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::lp_pool3d(x, F::LPPool3dFuncOptions(3, {3, 3, 5}).stride(3)); +/// ``` +inline Tensor lp_pool3d( + const Tensor& input, + const LPPool3dFuncOptions& options) { + return detail::lp_pool3d( + input, + options.norm_type(), + options.kernel_size(), + options.stride(), + options.ceil_mode()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/upsampling.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/upsampling.h new file mode 100644 index 0000000000000000000000000000000000000000..984cb72cdacb8131f29a8ac15687d3a4217ab429 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/upsampling.h @@ -0,0 +1,289 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +inline std::vector _interp_output_size( + int64_t dim, + std::tuple< + Tensor, + c10::optional>, + c10::optional>, + c10::optional> closed_over_args) { + auto [input, size, scale_factor, recompute_scale_factor] = closed_over_args; + if (size == c10::nullopt && scale_factor == c10::nullopt) { + TORCH_CHECK(false, "either size or scale_factor should be defined"); + } + if (size != c10::nullopt && scale_factor != c10::nullopt) { + TORCH_CHECK(false, "only one of size or scale_factor should be defined"); + } + if (scale_factor != c10::nullopt) { + if (static_cast(scale_factor.value().size()) != dim) { + TORCH_CHECK( + false, + "scale_factor shape must match input shape. ", + "Input is ", + dim, + "D, scale_factor size is ", + torch::ArrayRef(*scale_factor)); + } + } + if (size != c10::nullopt) { + return *size; + } + + TORCH_INTERNAL_ASSERT(scale_factor != c10::nullopt); + auto scale_factors = *scale_factor; + + if (recompute_scale_factor == c10::nullopt) { + // only warn when the scales have floating values since + // the result for ints is the same with/without recompute_scale_factor + bool is_float_scale_factor = false; + for (double scale : scale_factors) { + is_float_scale_factor = floor(scale) != scale; + if (is_float_scale_factor) { + break; + } + } + if (is_float_scale_factor) { + TORCH_WARN( + "The default behavior for interpolate/upsample with float scale_factor changed " + "in 1.6.0 to align with other frameworks/libraries, and uses scale_factor directly, " + "instead of relying on the computed output size. " + "If you wish to keep the old behavior, please set recompute_scale_factor=True. " + "See the documentation of nn.Upsample for details. "); + } + } + + std::vector ret; + for (const auto i : c10::irange(dim)) { + ret.emplace_back(static_cast( + floor(static_cast(input.size(i + 2)) * scale_factors[i]))); + } + return ret; +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor interpolate( + const Tensor& input, + const c10::optional>& size, + const c10::optional>& scale_factor, + InterpolateFuncOptions::mode_t mode, + c10::optional align_corners, + c10::optional recompute_scale_factor, + bool antialias) { + if (std::holds_alternative(mode) || + std::get_if(&mode)) { + if (align_corners != c10::nullopt) { + TORCH_CHECK( + false, + "align_corners option can only be set with the " + "interpolating modes: linear | bilinear | bicubic | trilinear"); + } + } else { + if (align_corners == c10::nullopt) { + TORCH_WARN( + "Default upsampling behavior when mode=", + enumtype::get_enum_name(mode), + " is changed " + "to align_corners=False since 0.4.0. Please specify " + "align_corners=True if the old behavior is desired. " + "See the documentation of nn.Upsample for details."); + align_corners = false; + } + } + + TORCH_CHECK( + input.dim() >= 3 && input.dim() <= 5, + "Input Error: Only 3D, 4D and 5D input Tensors supported " + "(got ", + input.dim(), + "D) for the modes: nearest | linear | bilinear | bicubic | trilinear " + "(got ", + enumtype::get_enum_name(mode), + ")"); + + auto scale_factor_len = input.dim() - 2; + std::vector> scale_factor_list( + scale_factor_len, c10::nullopt); + if (scale_factor != c10::nullopt && !recompute_scale_factor.value_or(false)) { + auto _scale_factor_repeated = *scale_factor; + scale_factor_list = {}; + for (const auto& elem : _scale_factor_repeated) { + scale_factor_list.emplace_back(elem); + } + } + + if (antialias && + !(input.dim() == 4 && + (std::get_if(&mode) || + std::get_if(&mode)))) { + TORCH_CHECK( + false, + "Anti-alias option is only supported for bilinear and bicubic modes"); + } + + auto closed_over_args = + std::make_tuple(input, size, scale_factor, recompute_scale_factor); + if (input.dim() == 3 && std::get_if(&mode)) { + return torch::upsample_nearest1d( + input, + _interp_output_size(1, std::move(closed_over_args)), + scale_factor_list.at(0)); + } else if (input.dim() == 4 && std::get_if(&mode)) { + return torch::upsample_nearest2d( + input, + _interp_output_size(2, std::move(closed_over_args)), + scale_factor_list.at(0), + scale_factor_list.at(1)); + } else if (input.dim() == 5 && std::get_if(&mode)) { + return torch::upsample_nearest3d( + input, + _interp_output_size(3, std::move(closed_over_args)), + scale_factor_list.at(0), + scale_factor_list.at(1), + scale_factor_list.at(2)); + } else if (input.dim() == 3 && std::get_if(&mode)) { + return torch::_upsample_nearest_exact1d( + input, + _interp_output_size(1, std::move(closed_over_args)), + scale_factor_list.at(0)); + } else if (input.dim() == 4 && std::get_if(&mode)) { + return torch::_upsample_nearest_exact2d( + input, + _interp_output_size(2, std::move(closed_over_args)), + scale_factor_list.at(0), + scale_factor_list.at(1)); + } else if (input.dim() == 5 && std::get_if(&mode)) { + return torch::_upsample_nearest_exact3d( + input, + _interp_output_size(3, std::move(closed_over_args)), + scale_factor_list.at(0), + scale_factor_list.at(1), + scale_factor_list.at(2)); + } else if (input.dim() == 3 && std::get_if(&mode)) { + return detail::adaptive_avg_pool1d( + input, _interp_output_size(1, std::move(closed_over_args))); + } else if (input.dim() == 4 && std::get_if(&mode)) { + return detail::adaptive_avg_pool2d( + input, _interp_output_size(2, std::move(closed_over_args))); + } else if (input.dim() == 5 && std::get_if(&mode)) { + return detail::adaptive_avg_pool3d( + input, _interp_output_size(3, std::move(closed_over_args))); + } else if (input.dim() == 3 && std::get_if(&mode)) { + TORCH_CHECK( + align_corners != c10::nullopt, "align_corners should be specified."); + return torch::upsample_linear1d( + input, + _interp_output_size(1, std::move(closed_over_args)), + *align_corners, + scale_factor_list.at(0)); + } else if (input.dim() == 3 && std::get_if(&mode)) { + TORCH_CHECK(false, "Got 3D input, but bilinear mode needs 4D input"); + } else if (input.dim() == 3 && std::get_if(&mode)) { + TORCH_CHECK(false, "Got 3D input, but trilinear mode needs 5D input"); + } else if (input.dim() == 4 && std::get_if(&mode)) { + TORCH_CHECK(false, "Got 4D input, but linear mode needs 3D input"); + } else if (input.dim() == 4 && std::get_if(&mode)) { + TORCH_CHECK( + align_corners != c10::nullopt, "align_corners should be specified."); + if (antialias) { + return torch::_upsample_bilinear2d_aa( + input, + _interp_output_size(2, std::move(closed_over_args)), + *align_corners, + scale_factor_list.at(0), + scale_factor_list.at(1)); + } + return torch::upsample_bilinear2d( + input, + _interp_output_size(2, std::move(closed_over_args)), + *align_corners, + scale_factor_list.at(0), + scale_factor_list.at(1)); + } else if (input.dim() == 4 && std::get_if(&mode)) { + TORCH_CHECK(false, "Got 4D input, but trilinear mode needs 5D input"); + } else if (input.dim() == 5 && std::get_if(&mode)) { + TORCH_CHECK(false, "Got 5D input, but linear mode needs 3D input"); + } else if (input.dim() == 5 && std::get_if(&mode)) { + TORCH_CHECK(false, "Got 5D input, but bilinear mode needs 4D input"); + } else if (input.dim() == 5 && std::get_if(&mode)) { + TORCH_CHECK( + align_corners != c10::nullopt, "align_corners should be specified."); + return torch::upsample_trilinear3d( + input, + _interp_output_size(3, std::move(closed_over_args)), + *align_corners, + scale_factor_list.at(0), + scale_factor_list.at(1), + scale_factor_list.at(2)); + } else if (input.dim() == 4 && std::get_if(&mode)) { + TORCH_CHECK( + align_corners != c10::nullopt, "align_corners should be specified."); + if (antialias) { + return torch::_upsample_bicubic2d_aa( + input, + _interp_output_size(2, std::move(closed_over_args)), + *align_corners, + scale_factor_list.at(0), + scale_factor_list.at(1)); + } + return torch::upsample_bicubic2d( + input, + _interp_output_size(2, std::move(closed_over_args)), + *align_corners, + scale_factor_list.at(0), + scale_factor_list.at(1)); + } else { + TORCH_CHECK( + false, + "Input Error: Only 3D, 4D and 5D input Tensors supported " + "(got ", + input.dim(), + "D) for the modes: nearest | linear | bilinear | bicubic | trilinear " + "(got ", + enumtype::get_enum_name(mode), + ")"); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.interpolate +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::InterpolateFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::interpolate(input, +/// F::InterpolateFuncOptions().size({4}).mode(torch::kNearest)); +/// ``` +inline Tensor interpolate( + const Tensor& input, + const InterpolateFuncOptions& options = {}) { + return detail::interpolate( + input, + options.size(), + options.scale_factor(), + options.mode(), + options.align_corners(), + options.recompute_scale_factor(), + options.antialias()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/vision.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/vision.h new file mode 100644 index 0000000000000000000000000000000000000000..4aa1d3fe0a477c5865b7d40bbd21f3b5ff7bcfec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/vision.h @@ -0,0 +1,124 @@ +#pragma once + +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +inline Tensor affine_grid( + const Tensor& theta, + const IntArrayRef& size, + bool align_corners = false) { + // enforce floating point dtype on theta + TORCH_CHECK( + theta.is_floating_point(), + "Expected theta to have floating point type, but got ", + theta.dtype()); + + // check that shapes and sizes match + if (size.size() == 4) { + TORCH_CHECK( + theta.dim() == 3 && theta.size(-2) == 2 && theta.size(-1) == 3, + "Expected a batch of 2D affine matrices of shape Nx2x3 for size ", + size, + ". Got ", + theta.sizes(), + "."); + } else if (size.size() == 5) { + TORCH_CHECK( + theta.dim() == 3 && theta.size(-2) == 3 && theta.size(-1) == 4, + "Expected a batch of 3D affine matrices of shape Nx3x4 for size ", + size, + ". Got ", + theta.sizes(), + "."); + } else { + TORCH_CHECK( + false, + "affine_grid only supports 4D and 5D sizes, ", + "for 2D and 3D affine transforms, respectively. ", + "Got size ", + size); + } + + if (*std::min_element(size.begin(), size.end()) <= 0) { + TORCH_CHECK(false, "Expected non-zero, positive output size. Got ", size); + } + + return torch::affine_grid_generator(theta, size, align_corners); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor grid_sample( + const Tensor& input, + const Tensor& grid, + GridSampleFuncOptions::mode_t mode, + GridSampleFuncOptions::padding_mode_t padding_mode, + c10::optional align_corners) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int64_t mode_enum, padding_mode_enum; + + if (std::holds_alternative(mode)) { + mode_enum = 0; + } else if (std::holds_alternative(mode)) { + mode_enum = 1; + } else { /// mode == 'bicubic' + mode_enum = 2; + } + + if (std::holds_alternative(padding_mode)) { + padding_mode_enum = 0; + } else if (std::holds_alternative(padding_mode)) { + padding_mode_enum = 1; + } else { /// padding_mode == 'reflection' + padding_mode_enum = 2; + } + + if (!align_corners.has_value()) { + TORCH_WARN( + "Default grid_sample and affine_grid behavior has changed ", + "to align_corners=False since 1.3.0. Please specify ", + "align_corners=True if the old behavior is desired. ", + "See the documentation of grid_sample for details."); + align_corners = false; + } + + return torch::grid_sampler( + input, grid, mode_enum, padding_mode_enum, align_corners.value()); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.grid_sample +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::GridSampleFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::grid_sample(input, grid, +/// F::GridSampleFuncOptions().mode(torch::kBilinear).padding_mode(torch::kZeros).align_corners(true)); +/// ``` +inline Tensor grid_sample( + const Tensor& input, + const Tensor& grid, + const GridSampleFuncOptions& options = {}) { + return detail::grid_sample( + input, + grid, + options.mode(), + options.padding_mode(), + options.align_corners()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h new file mode 100644 index 0000000000000000000000000000000000000000..d08d785f1dade72ccc4bc28a48c1d8014c257c6c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h @@ -0,0 +1,124 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { +namespace init { + +using NonlinearityType = std::variant< + enumtype::kLinear, + enumtype::kConv1D, + enumtype::kConv2D, + enumtype::kConv3D, + enumtype::kConvTranspose1D, + enumtype::kConvTranspose2D, + enumtype::kConvTranspose3D, + enumtype::kSigmoid, + enumtype::kTanh, + enumtype::kReLU, + enumtype::kLeakyReLU>; + +using FanModeType = std::variant; + +} // namespace init +} // namespace nn + +namespace nn { +namespace init { + +/// Return the recommended gain value for the given nonlinearity function. +TORCH_API double calculate_gain( + NonlinearityType nonlinearity, + double param = 0.01); + +/// Fills the given `tensor` with the provided `value` in-place, and returns it. +/// No gradient will be recorded for this operation. +TORCH_API Tensor constant_(Tensor tensor, Scalar value); + +/// Fills the given `tensor` with the Dirac delta function in-place, and returns +/// it. No gradient will be recorded for this operation. +TORCH_API Tensor dirac_(Tensor tensor); + +/// Fills the given 2-dimensional `matrix` with an identity matrix. +/// No gradient will be recorded for this operation. +TORCH_API Tensor eye_(Tensor matrix); + +/// Fills the given 2-dimensional `matrix` with values drawn from a normal +/// distribution parameterized by `mean` and `std`. +/// No gradient will be recorded for this operation. +TORCH_API Tensor normal_(Tensor tensor, double mean = 0, double std = 1); + +/// Fills the given `tensor` with ones. +/// No gradient will be recorded for this operation. +TORCH_API Tensor ones_(Tensor tensor); + +/// Fills the input `Tensor` with a (semi) orthogonal matrix, as described in +/// "Exact solutions to the nonlinear dynamics of learning in deep linear neural +/// networks" - Saxe, A. et al. (2013). The input tensor must have at least 2 +/// dimensions, and for tensors with more than 2 dimensions the trailing +/// dimensions are flattened. +/// No gradient will be recorded for this operation. +TORCH_API Tensor orthogonal_(Tensor tensor, double gain = 1.0); + +/// Fills the 2D input `Tensor` as a sparse matrix, where the +/// non-zero elements will be drawn from a centered normal distribution +/// with the given standard deviation `std`, as described in "Deep learning via +/// Hessian-free optimization" - Martens, J. (2010). The `sparsity` is a real +/// value between 0 and 1 that controls the fraction of elements in each column +/// to be set to zero. +/// No gradient will be recorded for this operation. +TORCH_API Tensor sparse_(Tensor tensor, double sparsity, double std = 0.01); + +/// Fills the given 2-dimensional `matrix` with values drawn from a uniform +/// distribution parameterized by `low` and `high`. +/// No gradient will be recorded for this operation. +TORCH_API Tensor uniform_(Tensor tensor, double low = 0, double high = 1); + +/// Fills the input `Tensor` with values according to the method +/// described in "Delving deep into rectifiers: Surpassing human-level +/// performance on ImageNet classification" - He, K. et al. (2015), using a +/// normal distribution. Also known as He initialization. +/// No gradient will be recorded for this operation. +TORCH_API Tensor kaiming_normal_( + Tensor tensor, + double a = 0, + FanModeType mode = torch::kFanIn, + NonlinearityType nonlinearity = torch::kLeakyReLU); + +/// Fills the input `Tensor` with values according to the method +/// described in "Delving deep into rectifiers: Surpassing human-level +/// performance on ImageNet classification" - He, K. et al. (2015), using a +/// uniform distribution. Also known as He initialization. +/// No gradient will be recorded for this operation. +TORCH_API Tensor kaiming_uniform_( + Tensor tensor, + double a = 0, + FanModeType mode = torch::kFanIn, + NonlinearityType nonlinearity = torch::kLeakyReLU); + +/// Fills the input `Tensor` with values according to the method +/// described in "Understanding the difficulty of training deep feedforward +/// neural networks" - Glorot, X. & Bengio, Y. (2010). Values are scaled by the +/// `gain` parameter. No gradient will be recorded for this operation. +TORCH_API Tensor xavier_normal_(Tensor tensor, double gain = 1.0); + +/// Fills the input `Tensor` with values according to the method +/// described in "Understanding the difficulty of training deep feedforward +/// neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform +/// distribution. Values are scaled by the `gain` parameter +/// No gradient will be recorded for this operation. +TORCH_API Tensor xavier_uniform_(Tensor tensor, double gain = 1.0); + +/// Fills the given `tensor` with zeros. +/// No gradient will be recorded for this operation. +TORCH_API Tensor zeros_(Tensor tensor); + +TORCH_API std::tuple _calculate_fan_in_and_fan_out( + const Tensor& tensor); + +} // namespace init +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h new file mode 100644 index 0000000000000000000000000000000000000000..de8d243533a787b1ed10ea5c90ef3286756177cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h @@ -0,0 +1,702 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// The base class for all modules in PyTorch. +/// +/// \rst +/// .. note:: +/// The design and implementation of this class is largely based on the Python +/// API. You may want to consult the python documentation for +/// :py:class:`pytorch:torch.nn.Module` for further clarification on certain +/// methods or behavior. +/// \endrst +/// +/// A `Module` is an abstraction over the implementation of some function or +/// algorithm, possibly associated with some persistent data. A `Module` may +/// contain further `Module`s ("submodules"), each with their own +/// implementation, persistent data and further submodules. `Module`s can thus +/// be said to form a recursive tree structure. A `Module` is registered as a +/// submodule to another `Module` by calling `register_module()`, typically from +/// within a parent module's constructor. +/// +/// A distinction is made between three kinds of persistent data that may be +/// associated with a `Module`: +/// +/// 1. *Parameters*: tensors that record gradients, typically weights updated +/// during the backward step (e.g. the `weight` of a `Linear` module), +/// 2. *Buffers*: tensors that do not record gradients, typically updated during +/// the forward step, such as running statistics (e.g. `mean` and `variance` +/// in the `BatchNorm` module), +/// 3. Any additional state, not necessarily tensors, required for the +/// implementation or configuration of a `Module`. +/// +/// The first two kinds of state are special in that they may be registered +/// with the `Module` system to allow convenient access and batch configuration. +/// For example, registered parameters in any `Module` may be iterated over via +/// the `parameters()` accessor. Further, changing the data type of a `Module`'s +/// registered parameters can be done conveniently via `Module::to()`, e.g. +/// `module->to(torch::kCUDA)` to move all parameters to GPU memory. Lastly, +/// registered parameters and buffers are handled specially during a `clone()` +/// operation, which performs a deepcopy of a cloneable `Module` hierarchy. +/// +/// Parameters are registered with a `Module` via `register_parameter`. Buffers +/// are registered separately via `register_buffer`. These methods are part of +/// the public API of `Module` and are typically invoked from within a +/// concrete `Module`s constructor. +class TORCH_API Module : public std::enable_shared_from_this { + public: + using ModuleApplyFunction = std::function; + using ConstModuleApplyFunction = std::function; + using NamedModuleApplyFunction = + std::function; + using ConstNamedModuleApplyFunction = + std::function; + using ModulePointerApplyFunction = + std::function&)>; + using NamedModulePointerApplyFunction = + std::function&)>; + + /// Tells the base `Module` about the name of the submodule. + explicit Module(std::string name); + + /// Constructs the module without immediate knowledge of the submodule's name. + /// The name of the submodule is inferred via RTTI (if possible) the first + /// time `.name()` is invoked. + Module(); + Module(const Module&) = default; + Module& operator=(const Module&) = default; + Module(Module&&) noexcept = default; + Module& operator=(Module&&) noexcept = default; + + virtual ~Module() = default; + + /// Returns the name of the `Module`. + /// + /// A `Module` has an associated `name`, which is a string representation of + /// the kind of concrete `Module` it represents, such as `"Linear"` for the + /// `Linear` module. Under most circumstances, this name is automatically + /// inferred via runtime type information (RTTI). In the unusual circumstance + /// that you have this feature disabled, you may want to manually name your + /// `Module`s by passing the string name to the `Module` base class' + /// constructor. + const std::string& name() const noexcept; + + /// Performs a recursive deep copy of the module and all its registered + /// parameters, buffers and submodules. + /// + /// Optionally, this method sets the current device + /// to the one supplied before cloning. If no device is given, each + /// parameter and buffer will be moved to the device of its source. + /// + /// \rst + /// .. attention:: + /// Attempting to call the `clone()` method inherited from the base `Module` + /// class (the one documented here) will fail. To inherit an actual + /// implementation of `clone()`, you must subclass `Cloneable`. `Cloneable` + /// is templatized on the concrete module type, and can thus properly copy a + /// `Module`. This method is provided on the base class' API solely for an + /// easier-to-use polymorphic interface. + /// \endrst + virtual std::shared_ptr clone( + const optional& device = nullopt) const; + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `Module&`. + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](nn::Module& module) { + /// std::cout << module.name() << std::endl; + /// }); + /// \endrst + void apply(const ModuleApplyFunction& function); + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const Module&`. + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const nn::Module& module) { + /// std::cout << module.name() << std::endl; + /// }); + /// \endrst + void apply(const ConstModuleApplyFunction& function) const; + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const std::string&` for the key of the module, + /// and a `Module&`. The key of the module itself is the empty string. If + /// `name_prefix` is given, it is prepended to every key as + /// `.` (and just `name_prefix` for the module itself). + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const std::string& key, nn::Module& module) { + /// std::cout << key << ": " << module.name() << std::endl; + /// }); + /// \endrst + void apply( + const NamedModuleApplyFunction& function, + const std::string& name_prefix = std::string()); + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const std::string&` for the key of the module, + /// and a `const Module&`. The key of the module itself is the empty string. + /// If `name_prefix` is given, it is prepended to every key as + /// `.` (and just `name_prefix` for the module itself). + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const std::string& key, const nn::Module& module) { + /// std::cout << key << ": " << module.name() << std::endl; + /// }); + /// \endrst + void apply( + const ConstNamedModuleApplyFunction& function, + const std::string& name_prefix = std::string()) const; + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const std::shared_ptr&`. + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const std::shared_ptr& module) { + /// std::cout << module->name() << std::endl; + /// }); + /// \endrst + void apply(const ModulePointerApplyFunction& function) const; + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const std::string&` for the key of the module, + /// and a `const std::shared_ptr&`. The key of the module itself is + /// the empty string. If `name_prefix` is given, it is prepended to every key + /// as + /// `.` (and just `name_prefix` for the module itself). + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const std::string& key, + /// const std::shared_ptr& module) { + /// std::cout << key << ": " << module->name() << std::endl; + /// }); + /// \endrst + void apply( + const NamedModulePointerApplyFunction& function, + const std::string& name_prefix = std::string()) const; + + /// Returns the parameters of this `Module` and if `recurse` is true, also + /// recursively of every submodule. + std::vector parameters(bool recurse = true) const; + + /// Returns an `OrderedDict` with the parameters of this `Module` along with + /// their keys, and if `recurse` is true also recursively of every submodule. + OrderedDict named_parameters(bool recurse = true) const; + + /// Returns the buffers of this `Module` and if `recurse` is true, also + /// recursively of every submodule. + std::vector buffers(bool recurse = true) const; + + /// Returns an `OrderedDict` with the buffers of this `Module` along with + /// their keys, and if `recurse` is true also recursively of every submodule. + OrderedDict named_buffers(bool recurse = true) const; + + /// Returns the submodules of this `Module` (the entire submodule hierarchy) + /// and if `include_self` is true, also inserts a `shared_ptr` to this module + /// in the first position. + /// + /// \rst + /// .. warning:: + /// Only pass `include_self` as `true` if this `Module` is stored in a + /// `shared_ptr`! Otherwise an exception will be thrown. You may still call + /// this method with `include_self` set to false if your `Module` is not + /// stored in a `shared_ptr`. + /// \endrst + std::vector> modules(bool include_self = true) const; + + /// Returns an `OrderedDict` of the submodules of this `Module` (the entire + /// submodule hierarchy) and their keys, and if `include_self` is true, also + /// inserts a `shared_ptr` to this module in the first position. If + /// `name_prefix` is given, it is prepended to every key as + /// `.` (and just `name_prefix` for the module itself). + /// + /// \rst + /// .. warning:: + /// Only pass `include_self` as `true` if this `Module` is stored in a + /// `shared_ptr`! Otherwise an exception will be thrown. You may still call + /// this method with `include_self` set to false if your `Module` is not + /// stored in a `shared_ptr`. + /// \endrst + OrderedDict> named_modules( + const std::string& name_prefix = std::string(), + bool include_self = true) const; + + /// Returns the direct submodules of this `Module`. + std::vector> children() const; + + /// Returns an `OrderedDict` of the direct submodules of this `Module` and + /// their keys. + OrderedDict> named_children() const; + + /// Enables "training" mode. + virtual void train(bool on = true); + + /// Calls train(false) to enable "eval" mode. + /// Do not override this method, override `train()` instead. + void eval(); + + /// True if the module is in training mode. + /// + /// Every `Module` has a boolean associated with it that determines whether + /// the `Module` is currently in *training* mode (set via `.train()`) or in + /// *evaluation* (inference) mode (set via `.eval()`). This property is + /// exposed via `is_training()`, and may be used by the implementation of a + /// concrete module to modify its runtime behavior. See the `BatchNorm` or + /// `Dropout` modules for examples of `Module`s that use different code paths + /// depending on this property. + virtual bool is_training() const noexcept; + + /// Recursively casts all parameters to the given `dtype` and `device`. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + virtual void to( + torch::Device device, + torch::Dtype dtype, + bool non_blocking = false); + + /// Recursively casts all parameters to the given dtype. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + virtual void to(torch::Dtype dtype, bool non_blocking = false); + + /// Recursively moves all parameters to the given device. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + virtual void to(torch::Device device, bool non_blocking = false); + + /// Recursively zeros out the `grad` value of each registered parameter. + virtual void zero_grad(bool set_to_none = true); + + /// Attempts to cast this `Module` to the given `ModuleType`. + /// + /// This method is useful when calling `apply()`. + /// \rst + /// .. code-block:: cpp + /// + /// void initialize_weights(nn::Module& module) { + /// torch::NoGradGuard no_grad; + /// if (auto* linear = module.as()) { + /// linear->weight.normal_(0.0, 0.02); + /// } + /// } + /// + /// MyModule module; + /// module->apply(initialize_weights); + /// \endrst + template + typename ModuleType::ContainedType* as() noexcept; + + /// Attempts to cast this `Module` to the given `ModuleType`. + /// + /// This method is useful when calling `apply()`. + /// \rst + /// .. code-block:: cpp + /// void initialize_weights(nn::Module& module) { + /// torch::NoGradGuard no_grad; + /// if (auto* linear = module.as()) { + /// linear->weight.normal_(0.0, 0.02); + /// } + /// } + /// + /// MyModule module; + /// module->apply(initialize_weights); + /// \endrst + template + const typename ModuleType::ContainedType* as() const noexcept; + + /// Attempts to cast this `Module` to the given `ModuleType`. + /// + /// This method is useful when calling `apply()`. + /// \rst + /// .. code-block:: cpp + /// + /// void initialize_weights(nn::Module& module) { + /// torch::NoGradGuard no_grad; + /// if (auto* linear = module.as()) { + /// linear->weight.normal_(0.0, 0.02); + /// } + /// } + /// + /// MyModule module; + /// module.apply(initialize_weights); + /// \endrst + template < + typename ModuleType, + typename = torch::detail::disable_if_module_holder_t> + ModuleType* as() noexcept; + + /// Attempts to cast this `Module` to the given `ModuleType`. + /// + /// This method is useful when calling `apply()`. + /// \rst + /// .. code-block:: cpp + /// + /// void initialize_weights(nn::Module& module) { + /// torch::NoGradGuard no_grad; + /// if (auto* linear = module.as()) { + /// linear->weight.normal_(0.0, 0.02); + /// } + /// } + /// + /// MyModule module; + /// module.apply(initialize_weights); + /// \endrst + template < + typename ModuleType, + typename = torch::detail::disable_if_module_holder_t> + const ModuleType* as() const noexcept; + + /// Serializes the `Module` into the given `OutputArchive`. + /// + /// If the `Module` contains unserializable submodules (e.g. + /// `nn::Functional`), those submodules are skipped when serializing. + virtual void save(serialize::OutputArchive& archive) const; + + /// Deserializes the `Module` from the given `InputArchive`. + /// + /// If the `Module` contains unserializable submodules (e.g. + /// `nn::Functional`), we don't check the existence of those submodules in the + /// `InputArchive` when deserializing. + virtual void load(serialize::InputArchive& archive); + + /// Streams a pretty representation of the `Module` into the given `stream`. + /// By default, this representation will be the name of the module (taken from + /// `name()`), followed by a recursive pretty print of all of the `Module`'s + /// submodules. + /// + /// Override this method to change the pretty print. The input + /// `stream` should be returned from the method, to allow easy chaining. + virtual void pretty_print(std::ostream& stream) const; + + /// Returns whether the `Module` is serializable. + virtual bool is_serializable() const; + + /// Registers a parameter with this `Module`. + /// + /// A parameter should be any gradient-recording tensor used in the + /// implementation of your `Module`. Registering it makes it available to + /// methods such as `parameters()`, `clone()` or `to().` + /// + /// Note that registering an undefined Tensor (e.g. + /// `module.register_parameter("param", Tensor())`) is allowed, and is + /// equivalent to `module.register_parameter("param", None)` in Python API. + /// + /// \rst + /// .. code-block:: cpp + /// + /// MyModule::MyModule() { + /// weight_ = register_parameter("weight", torch::randn({A, B})); + /// } + /// \endrst + Tensor& register_parameter( + std::string name, + Tensor tensor, + bool requires_grad = true); + + /// Registers a buffer with this `Module`. + /// + /// A buffer is intended to be state in your module that does not record + /// gradients, such as running statistics. Registering it makes it available + /// to methods such as `buffers()`, `clone()` or `to(). + /// + /// \rst + /// .. code-block:: cpp + /// + /// MyModule::MyModule() { + /// mean_ = register_buffer("mean", torch::empty({num_features_})); + /// } + /// \endrst + Tensor& register_buffer(std::string name, Tensor tensor); + + /// Registers a submodule with this `Module`. + /// + /// Registering a module makes it available to methods such as `modules()`, + /// `clone()` or `to()`. + /// + /// \rst + /// .. code-block:: cpp + /// + /// MyModule::MyModule() { + /// submodule_ = register_module("linear", torch::nn::Linear(3, 4)); + /// } + /// \endrst + template + std::shared_ptr register_module( + std::string name, + std::shared_ptr module); + + /// Registers a submodule with this `Module`. + /// + /// This method deals with `ModuleHolder`s. + /// + /// Registering a module makes it available to methods such as `modules()`, + /// `clone()` or `to()`. + /// + /// \rst + /// .. code-block:: cpp + /// + /// MyModule::MyModule() { + /// submodule_ = register_module("linear", torch::nn::Linear(3, 4)); + /// } + /// \endrst + template + std::shared_ptr register_module( + std::string name, + ModuleHolder module_holder); + + /// Replaces a registered submodule with this `Module`. + /// + /// This takes care of the registration, if you used submodule members, you + /// should + // assign the submodule as well, i.e. use as + /// module->submodule_ = module->replace_module("linear", + /// torch::nn::Linear(3, 4)); + /// It only works when a module of the name is already registered. + /// + /// This is useful for replacing a module after initialization, e.g. + /// for finetuning. + template + std::shared_ptr replace_module( + const std::string& name, + std::shared_ptr module); + + /// Replaces a registered submodule with this `Module`. + /// This method deals with `ModuleHolder`s. + /// + /// This takes care of the registration, if you used submodule members, you + /// should + // assign the submodule as well, i.e. use as + /// module->submodule_ = module->replace_module("linear", linear_holder); + /// It only works when a module of the name is already registered. + /// + /// This is useful for replacing a module after initialization, e.g. + /// for finetuning. + template + std::shared_ptr replace_module( + const std::string& name, + ModuleHolder module_holder); + + /// Unregisters a submodule from this `Module`. If there is no such module + /// with `name` an exception is thrown. + void unregister_module(const std::string& name); + + protected: + /// The following three functions allow a module with default arguments in its + /// forward method to be used in a Sequential module. + /// You should NEVER override these functions manually. Instead, you should + /// use the `FORWARD_HAS_DEFAULT_ARGS` macro. + virtual bool _forward_has_default_args() { + return false; + } + + virtual unsigned int _forward_num_required_args() { + TORCH_CHECK( + false, + "torch::nn::Module subclass that has default arguments in `forward` method ", + "must override `_forward_num_required_args` method. Please use ", + "`FORWARD_HAS_DEFAULT_ARGS` macro to do so."); + } + + virtual std::vector _forward_populate_default_args( + std::vector&& arguments) { + TORCH_CHECK( + false, + "torch::nn::Module subclass that has default arguments in `forward` method ", + "must override `_forward_populate_default_args` method. Please use ", + "`FORWARD_HAS_DEFAULT_ARGS` macro to do so."); + } + + /// The registered parameters of this `Module`. + /// Inorder to access parameters_ in ParameterDict and ParameterList + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + OrderedDict parameters_; + + private: + // Friend classes. + + template + friend class Cloneable; + + template + friend struct AnyModuleHolder; + + /// Pretty prints the given `Module` into the `ostream`. + TORCH_API friend std::ostream& operator<<( + std::ostream& stream, + const nn::Module& module); + + // data parallel using this method to configure gradient edges during the + // replicate step. + template + friend void replicate_grad_edges( + const std::shared_ptr& module, + const std::vector>& replicas, + const std::vector& devices); + + // Private methods. + + /// Used in the implementation of `Cloneable`. + virtual void clone_(Module& other, const optional& device); + + /// The implementation of the various `to()` methods. + template + void to_impl(Ts&&... ts); + + /// Implements pretty printing the module hierarchy. + void pretty_print_recursive( + std::ostream& stream, + const std::string& indentation) const; + + /// Applies the `function` to every submodule recursively, starting at this + /// `Module`'s children (thus not including the module itself). + void apply_to_submodules( + const NamedModulePointerApplyFunction& function, + const std::string& name_prefix = std::string()) const; + + /// Returns a shared_ptr to `this` in a safe (checked) way. + std::shared_ptr shared_from_this_checked() const; + + /// The registered buffers of this `Module`. + OrderedDict buffers_; + + /// The registered (direct) submodules of this `Module`. + OrderedDict> children_; + + /// The module's name (e.g. "LSTM"). + mutable optional name_; + + /// Whether the module is in training mode. + bool is_training_{true}; +}; + +/// Serialize a `Module` pointer into an `OutputArchive`. +TORCH_API serialize::OutputArchive& operator<<( + serialize::OutputArchive& archive, + const std::shared_ptr& module); + +/// Deserializes a `Module` from an `InputArchive`. +TORCH_API serialize::InputArchive& operator>>( + serialize::InputArchive& archive, + const std::shared_ptr& module); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nn::Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template +typename ModuleType::ContainedType* Module::as() noexcept { + // Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for + // `Linear`, since `LinearImpl` inherits `nn::Module`. + return as(); +} + +template +const typename ModuleType::ContainedType* Module::as() const noexcept { + // Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for + // `Linear`, since `LinearImpl` inherits `nn::Module`. + return as(); +} + +template +ModuleType* Module::as() noexcept { + return dynamic_cast(this); +} + +template +const ModuleType* Module::as() const noexcept { + return dynamic_cast(this); +} + +template +std::shared_ptr Module::register_module( + std::string name, + std::shared_ptr module) { + TORCH_CHECK(!name.empty(), "Submodule name must not be empty"); + TORCH_CHECK( + name.find('.') == std::string::npos, + "Submodule name must not contain a dot (got '", + name, + "')"); + auto& base_module = children_.insert(std::move(name), std::move(module)); + return std::dynamic_pointer_cast(base_module); +} + +template +std::shared_ptr Module::register_module( + std::string name, + ModuleHolder module_holder) { + return register_module(std::move(name), module_holder.ptr()); +} + +template +std::shared_ptr Module::replace_module( + const std::string& name, + std::shared_ptr module) { + auto& base_module = (children_[name] = std::move(module)); + return std::dynamic_pointer_cast(base_module); +} + +template +std::shared_ptr Module::replace_module( + const std::string& name, + ModuleHolder module_holder) { + return replace_module(name, module_holder.ptr()); +} + +template +void Module::to_impl(Ts&&... ts) { + // First call `to()` on every child module. + for (auto& child : children_) { + child.value()->to(ts...); + } + // Then move every parameter to the new dtype/device. + for (auto& parameter : named_parameters(/*recurse=*/false)) { + parameter->set_data(autograd::Variable(*parameter).to(ts...)); + } + // Then move every buffer to the new dtype/device. + for (auto& buffer : named_buffers(/*recurse=*/false)) { + buffer->set_data(autograd::Variable(*buffer).to(ts...)); + } +} + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h new file mode 100644 index 0000000000000000000000000000000000000000..e037d52a8535490ff5ecb17e578df5b4101ee9a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h @@ -0,0 +1,36 @@ +#pragma once + +// Common +#include + +// Containers +#include +#include +#include +#include +#include +#include +#include +#include + +// Layers +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/_functions.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..5bf1ce2dcb28537a0f37939c686450292a92c930 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/_functions.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { +namespace functions { + +class CrossMapLRN2d : public torch::autograd::Function { + public: + static torch::autograd::Variable forward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::Variable& input, + const CrossMapLRN2dOptions& options); + + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + torch::autograd::variable_list grad_output); +}; + +} // namespace functions +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/activation.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/activation.h new file mode 100644 index 0000000000000000000000000000000000000000..68056ec458ebb116c957b134ceb1053b135ba85b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/activation.h @@ -0,0 +1,875 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace nn { + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies elu over a given input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ELU to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ELUOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ELU model(ELUOptions().alpha(42.42).inplace(true)); +/// ``` +class TORCH_API ELUImpl : public torch::nn::Cloneable { + public: + explicit ELUImpl(const ELUOptions& options_ = {}); + + Tensor forward(Tensor input); + + void reset() override; + + /// Pretty prints the `ELU` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + ELUOptions options; +}; + +/// A `ModuleHolder` subclass for `ELUImpl`. +/// See the documentation for `ELUImpl` class to learn what methods it +/// provides, and examples of how to use `ELU` with `torch::nn::ELUOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(ELU); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the selu function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.SELU to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::SELUOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// SELU model(SELUOptions().inplace(true)); +/// ``` +class TORCH_API SELUImpl : public torch::nn::Cloneable { + public: + explicit SELUImpl(const SELUOptions& options_ = {}); + + Tensor forward(Tensor input); + + void reset() override; + + /// Pretty prints the `SELU` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + SELUOptions options; +}; + +/// A `ModuleHolder` subclass for `SELUImpl`. +/// See the documentation for `SELUImpl` class to learn what methods it +/// provides, and examples of how to use `SELU` with `torch::nn::SELUOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(SELU); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hardshrink ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the hard shrinkage function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Hardshrink to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::HardshrinkOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Hardshrink model(HardshrinkOptions().lambda(42.42)); +/// ``` +class TORCH_API HardshrinkImpl : public torch::nn::Cloneable { + public: + explicit HardshrinkImpl(const HardshrinkOptions& options_ = {}); + + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `Hardshrink` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + HardshrinkOptions options; +}; + +/// A `ModuleHolder` subclass for `HardshrinkImpl`. +/// See the documentation for `HardshrinkImpl` class to learn what methods it +/// provides, and examples of how to use `Hardshrink` with +/// `torch::nn::HardshrinkOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Hardshrink); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hardtanh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the HardTanh function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Hardtanh to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::HardtanhOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Hardtanh +/// model(HardtanhOptions().min_val(-42.42).max_val(0.42).inplace(true)); +/// ``` +class TORCH_API HardtanhImpl : public torch::nn::Cloneable { + public: + explicit HardtanhImpl(const HardtanhOptions& options_ = {}); + + Tensor forward(Tensor input); + + void reset() override; + + /// Pretty prints the `Hardtanh` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + HardtanhOptions options; +}; + +/// A `ModuleHolder` subclass for `HardtanhImpl`. +/// See the documentation for `HardtanhImpl` class to learn what methods it +/// provides, and examples of how to use `Hardtanh` with +/// `torch::nn::HardtanhOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Hardtanh); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LeakyReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the LeakyReLU function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.LeakyReLU to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::LeakyReLUOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// LeakyReLU model(LeakyReLUOptions().negative_slope(0.42).inplace(true)); +/// ``` +class TORCH_API LeakyReLUImpl : public torch::nn::Cloneable { + public: + explicit LeakyReLUImpl(const LeakyReLUOptions& options_ = {}); + + Tensor forward(Tensor input); + + void reset() override; + + /// Pretty prints the `LeakyReLU` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + LeakyReLUOptions options; +}; + +/// A `ModuleHolder` subclass for `LeakyReLUImpl`. +/// See the documentation for `LeakyReLUImpl` class to learn what methods it +/// provides, and examples of how to use `LeakyReLU` with +/// `torch::nn::LeakyReLUOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(LeakyReLU); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LogSigmoid ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the LogSigmoid function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.LogSigmoid to learn +/// about the exact behavior of this module. +class TORCH_API LogSigmoidImpl : public torch::nn::Cloneable { + public: + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `LogSigmoid` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; +}; + +/// A `ModuleHolder` subclass for `LogSigmoidImpl`. +/// See the documentation for `LogSigmoidImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(LogSigmoid); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softmax ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the Softmax function. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Softmax to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::SoftmaxOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Softmax model(SoftmaxOptions(1)); +/// ``` +class TORCH_API SoftmaxImpl : public torch::nn::Cloneable { + public: + explicit SoftmaxImpl(int64_t dim) : SoftmaxImpl(SoftmaxOptions(dim)) {} + explicit SoftmaxImpl(const SoftmaxOptions& options_); + + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `Softmax` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + SoftmaxOptions options; +}; + +/// A `ModuleHolder` subclass for `SoftmaxImpl`. +/// See the documentation for `SoftmaxImpl` class to learn what methods it +/// provides, and examples of how to use `Softmax` with +/// `torch::nn::SoftmaxOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Softmax); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softmin ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the Softmin function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Softmin to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::SoftminOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Softmin model(SoftminOptions(1)); +/// ``` +class TORCH_API SoftminImpl : public torch::nn::Cloneable { + public: + explicit SoftminImpl(int64_t dim) : SoftminImpl(SoftminOptions(dim)) {} + explicit SoftminImpl(const SoftminOptions& options_); + + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `Softmin` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + SoftminOptions options; +}; + +/// A `ModuleHolder` subclass for `SoftminImpl`. +/// See the documentation for `SoftminImpl` class to learn what methods it +/// provides, and examples of how to use `Softmin` with +/// `torch::nn::SoftminOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Softmin); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LogSoftmax ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the LogSoftmax function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.LogSoftmax to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::LogSoftmaxOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// LogSoftmax model(LogSoftmaxOptions(1)); +/// ``` +class TORCH_API LogSoftmaxImpl : public torch::nn::Cloneable { + public: + explicit LogSoftmaxImpl(int64_t dim) + : LogSoftmaxImpl(LogSoftmaxOptions(dim)) {} + explicit LogSoftmaxImpl(const LogSoftmaxOptions& options_); + + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `LogSoftmax` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + LogSoftmaxOptions options; +}; + +/// A `ModuleHolder` subclass for `LogSoftmaxImpl`. +/// See the documentation for `LogSoftmaxImpl` class to learn what methods it +/// provides, and examples of how to use `LogSoftmax` with +/// `torch::nn::LogSoftmaxOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(LogSoftmax); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softmax2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the Softmax2d function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Softmax2d to learn +/// about the exact behavior of this module. +class TORCH_API Softmax2dImpl : public torch::nn::Cloneable { + public: + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `Softmax2d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; +}; + +/// A `ModuleHolder` subclass for `Softmax2dImpl`. +/// See the documentation for `Softmax2dImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(Softmax2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the PReLU function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.PReLU to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::PReLUOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// PReLU model(PReLUOptions().num_parameters(42)); +/// ``` +class TORCH_API PReLUImpl : public torch::nn::Cloneable { + public: + explicit PReLUImpl(const PReLUOptions& options_ = {}); + + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `PReLU` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + PReLUOptions options; + + /// The learned weight. + Tensor weight; +}; + +/// A `ModuleHolder` subclass for `PReLUImpl`. +/// See the documentation for `PReLUImpl` class to learn what methods it +/// provides, and examples of how to use `PReLU` with `torch::nn::PReLUOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(PReLU); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the ReLU function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ReLU to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ReLUOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ReLU model(ReLUOptions().inplace(true)); +/// ``` +class TORCH_API ReLUImpl : public torch::nn::Cloneable { + public: + explicit ReLUImpl(const ReLUOptions& options_ = {}); + + Tensor forward(Tensor input); + + void reset() override; + + /// Pretty prints the `ReLU` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + ReLUOptions options; +}; + +/// A `ModuleHolder` subclass for `ReLUImpl`. +/// See the documentation for `ReLUImpl` class to learn what methods it +/// provides, and examples of how to use `ReLU` with `torch::nn::ReLUOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(ReLU); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReLU6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the ReLU6 function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ReLU6 to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ReLU6Options` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ReLU6 model(ReLU6Options().inplace(true)); +/// ``` +class TORCH_API ReLU6Impl : public torch::nn::Cloneable { + public: + explicit ReLU6Impl(const ReLU6Options& options_ = {}); + + Tensor forward(Tensor input); + + void reset() override; + + /// Pretty prints the `ReLU6` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + ReLU6Options options; +}; + +/// A `ModuleHolder` subclass for `ReLU6Impl`. +/// See the documentation for `ReLU6Impl` class to learn what methods it +/// provides, and examples of how to use `ReLU6` with `torch::nn::ReLU6Options`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(ReLU6); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the RReLU function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.RReLU to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::RReLUOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// RReLU model(RReLUOptions().lower(0.24).upper(0.42).inplace(true)); +/// ``` +class TORCH_API RReLUImpl : public torch::nn::Cloneable { + public: + explicit RReLUImpl(const RReLUOptions& options_ = {}); + + Tensor forward(Tensor input); + + void reset() override; + + /// Pretty prints the `RReLU` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + RReLUOptions options; +}; + +/// A `ModuleHolder` subclass for `RReLUImpl`. +/// See the documentation for `RReLUImpl` class to learn what methods it +/// provides, and examples of how to use `RReLU` with `torch::nn::RReLUOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(RReLU); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies celu over a given input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.CELU to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::CELUOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// CELU model(CELUOptions().alpha(42.42).inplace(true)); +/// ``` +class TORCH_API CELUImpl : public torch::nn::Cloneable { + public: + explicit CELUImpl(const CELUOptions& options_ = {}); + + Tensor forward(Tensor input); + + void reset() override; + + /// Pretty prints the `CELU` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + CELUOptions options; +}; + +/// A `ModuleHolder` subclass for `CELUImpl`. +/// See the documentation for `CELUImpl` class to learn what methods it +/// provides, and examples of how to use `CELU` with `torch::nn::CELUOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(CELU); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies glu over a given input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.GLU to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::GLUOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// GLU model(GLUOptions(1)); +/// ``` +class TORCH_API GLUImpl : public torch::nn::Cloneable { + public: + explicit GLUImpl(const GLUOptions& options_ = {}); + + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `GLU` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + GLUOptions options; +}; + +/// A `ModuleHolder` subclass for `GLUImpl`. +/// See the documentation for `GLUImpl` class to learn what methods it +/// provides, and examples of how to use `GLU` with `torch::nn::GLUOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(GLU); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies gelu over a given input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.GELU to learn +/// about the exact behavior of this module. +class TORCH_API GELUImpl : public torch::nn::Cloneable { + public: + explicit GELUImpl(GELUOptions options_ = {}); + + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `GELU` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + GELUOptions options; +}; + +/// A `ModuleHolder` subclass for `GELUImpl`. +/// See the documentation for `GELUImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(GELU); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SiLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies silu over a given input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.SiLU to learn +/// about the exact behavior of this module. +class TORCH_API SiLUImpl : public torch::nn::Cloneable { + public: + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `SiLU` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; +}; + +/// A `ModuleHolder` subclass for `SiLUImpl`. +/// See the documentation for `SiLUImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(SiLU); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Mish ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies mish over a given input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Mish to learn +/// about the exact behavior of this module. +class TORCH_API MishImpl : public torch::nn::Cloneable { + public: + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `Mish` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; +}; + +/// A `ModuleHolder` subclass for `MishImpl`. +/// See the documentation for `MishImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(Mish); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sigmoid ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies sigmoid over a given input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Sigmoid to learn +/// about the exact behavior of this module. +class TORCH_API SigmoidImpl : public torch::nn::Cloneable { + public: + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `Sigmoid` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; +}; + +/// A `ModuleHolder` subclass for `SigmoidImpl`. +/// See the documentation for `SigmoidImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(Sigmoid); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softplus ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies softplus over a given input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Softplus to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::SoftplusOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Softplus model(SoftplusOptions().beta(0.24).threshold(42.42)); +/// ``` +class TORCH_API SoftplusImpl : public torch::nn::Cloneable { + public: + explicit SoftplusImpl(const SoftplusOptions& options_ = {}); + + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `Softplus` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + SoftplusOptions options; +}; + +/// A `ModuleHolder` subclass for `SoftplusImpl`. +/// See the documentation for `SoftplusImpl` class to learn what methods it +/// provides, and examples of how to use `Softplus` with +/// `torch::nn::SoftplusOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Softplus); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softshrink ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the soft shrinkage function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Softshrink to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::SoftshrinkOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Softshrink model(SoftshrinkOptions(42.42)); +/// ``` +class TORCH_API SoftshrinkImpl : public torch::nn::Cloneable { + public: + explicit SoftshrinkImpl(const SoftshrinkOptions& options_ = {}); + + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `Softshrink` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + SoftshrinkOptions options; +}; + +/// A `ModuleHolder` subclass for `SoftshrinkImpl`. +/// See the documentation for `SoftshrinkImpl` class to learn what methods it +/// provides, and examples of how to use `Softshrink` with +/// `torch::nn::SoftshrinkOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Softshrink); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softsign ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies Softsign over a given input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Softsign to learn +/// about the exact behavior of this module. +class TORCH_API SoftsignImpl : public torch::nn::Cloneable { + public: + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `Softsign` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; +}; + +/// A `ModuleHolder` subclass for `SoftsignImpl`. +/// See the documentation for `SoftsignImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(Softsign); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tanh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies Tanh over a given input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Tanh to learn +/// about the exact behavior of this module. +class TORCH_API TanhImpl : public torch::nn::Cloneable { + public: + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `Tanh` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; +}; + +/// A `ModuleHolder` subclass for `TanhImpl`. +/// See the documentation for `TanhImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(Tanh); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tanhshrink ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies Tanhshrink over a given input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Tanhshrink to learn +/// about the exact behavior of this module. +class TORCH_API TanhshrinkImpl : public torch::nn::Cloneable { + public: + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `Tanhshrink` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; +}; + +/// A `ModuleHolder` subclass for `TanhshrinkImpl`. +/// See the documentation for `TanhshrinkImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(Tanhshrink); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Threshold ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the Threshold function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Threshold to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ThresholdOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Threshold model(ThresholdOptions(42.42, 24.24).inplace(true)); +/// ``` +class TORCH_API ThresholdImpl : public torch::nn::Cloneable { + public: + ThresholdImpl(double threshold, double value) + : ThresholdImpl(ThresholdOptions(threshold, value)) {} + explicit ThresholdImpl(const ThresholdOptions& options_); + + Tensor forward(Tensor input); + + void reset() override; + + /// Pretty prints the `Threshold` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + ThresholdOptions options; +}; + +/// A `ModuleHolder` subclass for `ThresholdImpl`. +/// See the documentation for `ThresholdImpl` class to learn what methods it +/// provides, and examples of how to use `Threshold` with +/// `torch::nn::ThresholdOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Threshold); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MultiheadAttention ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the MultiheadAttention function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.MultiheadAttention +/// to learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::MultiheadAttentionOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// MultiheadAttention model(MultiheadAttentionOptions(20, 10).bias(false)); +/// ``` +class TORCH_API MultiheadAttentionImpl + : public torch::nn::Cloneable { + public: + MultiheadAttentionImpl(int64_t embed_dim, int64_t num_heads) + : MultiheadAttentionImpl( + MultiheadAttentionOptions(embed_dim, num_heads)) {} + explicit MultiheadAttentionImpl(const MultiheadAttentionOptions& options_); + + std::tuple forward( + const Tensor& query, + const Tensor& key, + const Tensor& value, + const Tensor& key_padding_mask = {}, + bool need_weights = true, + const Tensor& attn_mask = {}, + bool average_attn_weights = true); + + protected: + FORWARD_HAS_DEFAULT_ARGS( + {3, AnyValue(Tensor())}, + {4, AnyValue(true)}, + {5, AnyValue(Tensor())}, + {6, AnyValue(true)}) + + public: + void reset() override; + + void _reset_parameters(); + + /// The options with which this `Module` was constructed. + MultiheadAttentionOptions options; + + bool _qkv_same_embed_dim; + Tensor in_proj_weight; + Tensor in_proj_bias; + Tensor bias_k; + Tensor bias_v; + Linear out_proj = nullptr; + Tensor q_proj_weight; + Tensor k_proj_weight; + Tensor v_proj_weight; + int64_t head_dim; +}; + +/// A `ModuleHolder` subclass for `MultiheadAttentionImpl`. +/// See the documentation for `MultiheadAttentionImpl` class to learn what +/// methods it provides, and examples of how to use `MultiheadAttention` with +/// `torch::nn::MultiheadAttentionOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(MultiheadAttention); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/adaptive.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/adaptive.h new file mode 100644 index 0000000000000000000000000000000000000000..939d57dd5d5107e2e4f6975d84c1a87cbc17e154 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/adaptive.h @@ -0,0 +1,109 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// The output of a single invocation of an AdaptiveLogSoftmaxWithLoss +/// module's `forward()` method. +struct TORCH_API ASMoutput { + ASMoutput(Tensor output_, double loss_); + + /// Tensor containing computed target log probabilities for each example + Tensor output; + + /// Scalar representing the computed negative log likelihood loss + double loss; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveLogSoftmaxWithLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Efficient softmax approximation as described in +/// `Efficient softmax approximation for GPUs`_ by Edouard Grave, Armand Joulin, +/// Moustapha Cissé, David Grangier, and Hervé Jégou. +/// See +/// https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveLogSoftmaxWithLoss +/// to learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::AdaptiveLogSoftmaxWithLossOptions` +/// class to learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// AdaptiveLogSoftmaxWithLoss model(AdaptiveLogSoftmaxWithLossOptions(8, 10, +/// {4, 8}).div_value(2.).head_bias(true)); +/// ``` +class TORCH_API AdaptiveLogSoftmaxWithLossImpl + : public Cloneable { + public: + AdaptiveLogSoftmaxWithLossImpl( + int64_t in_features, + int64_t n_classes, + std::vector cutoffs) + : AdaptiveLogSoftmaxWithLossImpl(AdaptiveLogSoftmaxWithLossOptions( + in_features, + n_classes, + cutoffs)) {} + + explicit AdaptiveLogSoftmaxWithLossImpl( + AdaptiveLogSoftmaxWithLossOptions options_); + + ASMoutput forward(const Tensor& input, const Tensor& target); + + void reset() override; + + void reset_parameters(); + + /// Pretty prints the `AdaptiveLogSoftmaxWithLoss` module into the given + /// `stream`. + void pretty_print(std::ostream& stream) const override; + + /// Given input tensor, and output of `head`, computes the log of the full + /// distribution + Tensor _get_full_log_prob(const Tensor& input, const Tensor& head_output); + + /// Computes log probabilities for all n_classes + Tensor log_prob(const Tensor& input); + + /// This is equivalent to `log_pob(input).argmax(1)` but is more efficient in + /// some cases + Tensor predict(const Tensor& input); + + /// The options with which this `Module` was constructed + AdaptiveLogSoftmaxWithLossOptions options; + + /// Cutoffs used to assign targets to their buckets. It should be an ordered + /// Sequence of integers sorted in the increasing order + std::vector cutoffs; + + int64_t shortlist_size; + + /// Number of clusters + int64_t n_clusters; + + /// Output size of head classifier + int64_t head_size; + + Linear head = nullptr; + + ModuleList tail; +}; + +/// A `ModuleHolder` subclass for `AdaptiveLogSoftmaxWithLossImpl`. +/// See the documentation for `AdaptiveLogSoftmaxWithLossImpl` class to learn +/// what methods it provides, and examples of how to use +/// `AdaptiveLogSoftmaxWithLoss` with +/// `torch::nn::AdaptiveLogSoftmaxWithLossOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(AdaptiveLogSoftmaxWithLoss); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/batchnorm.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/batchnorm.h new file mode 100644 index 0000000000000000000000000000000000000000..3264d90bd6ed7abf4635c09cda0a396978a41f66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/batchnorm.h @@ -0,0 +1,250 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace nn { + +/// Base class for all (dimension-specialized) batchnorm and instancenorm +/// modules. +template +class NormImplBase : public torch::nn::Cloneable { + protected: + virtual void _check_input_dim(const Tensor& input) = 0; + + public: + NormImplBase(const DerivedOptions& options_) : options(options_) { + // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall) + reset(); + } + + void reset() override { + if (options.affine()) { + weight = this->register_parameter( + "weight", torch::empty({options.num_features()})); + bias = this->register_parameter( + "bias", torch::empty({options.num_features()})); + } else { + weight = + this->register_parameter("weight", Tensor(), /*requires_grad=*/false); + bias = + this->register_parameter("bias", Tensor(), /*requires_grad=*/false); + } + if (options.track_running_stats()) { + running_mean = this->register_buffer( + "running_mean", torch::zeros({options.num_features()})); + running_var = this->register_buffer( + "running_var", torch::ones({options.num_features()})); + num_batches_tracked = this->register_buffer( + "num_batches_tracked", torch::tensor(0, torch::dtype(torch::kLong))); + } else { + running_mean = this->register_buffer("running_mean", Tensor()); + running_var = this->register_buffer("running_var", Tensor()); + num_batches_tracked = + this->register_buffer("num_batches_tracked", Tensor()); + } + reset_parameters(); + } + + void reset_running_stats() { + if (options.track_running_stats()) { + running_mean.zero_(); + running_var.fill_(1); + num_batches_tracked.zero_(); + } + } + + void reset_parameters() { + reset_running_stats(); + if (options.affine()) { + torch::nn::init::ones_(weight); + torch::nn::init::zeros_(bias); + } + } + + /// The options with which this module was constructed. + DerivedOptions options; + + /// The learned weight. + /// Only defined if the `affine` option was `true` upon construction. + Tensor weight; + + /// The learned bias. + /// Only defined if the `affine` option was `true` upon construction. + Tensor bias; + + /// The running mean. + /// Only defined if the `track_running_stats` option was `true` upon + /// construction. + Tensor running_mean; + + /// The running variance. + /// Only defined if the `track_running_stats` option was `true` upon + /// construction. + Tensor running_var; + + /// The number of the forward call. + /// Only defined if the `track_running_stats` option was `true` upon + /// construction. + Tensor num_batches_tracked; +}; + +/// Base class for all (dimension-specialized) batchnorm modules. +template +class BatchNormImplBase : public NormImplBase { + public: + using NormImplBase::NormImplBase; + + Tensor forward(const Tensor& input) { + this->_check_input_dim(input); + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + double exponential_average_factor; + if (this->options.momentum() == c10::nullopt) { + exponential_average_factor = 0.0; + } else { + exponential_average_factor = this->options.momentum().value(); + } + + if (this->is_training() && this->options.track_running_stats()) { + if (this->num_batches_tracked.defined()) { + this->num_batches_tracked += 1; + if (this->options.momentum() == + c10::nullopt) { // use cumulative moving average + exponential_average_factor = + 1.0 / this->num_batches_tracked.template item(); + } else { // use exponential moving average + exponential_average_factor = this->options.momentum().value(); + } + } + } + + return torch::nn::functional::detail::batch_norm( + input, + this->running_mean, + this->running_var, + this->weight, + this->bias, + this->is_training() || !this->options.track_running_stats(), + /*momentum=*/exponential_average_factor, + this->options.eps()); + } + + /// Pretty prints the `BatchNorm{1,2,3}d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override { + stream << std::boolalpha << "torch::nn::BatchNorm" << D << "d(" + << this->options.num_features() << ", " + << "eps=" << this->options.eps() << ", " + << "momentum="; + + if (this->options.momentum().has_value()) { + stream << this->options.momentum().value(); + } else { + stream << "None"; + } + + stream << ", " + << "affine=" << this->options.affine() << ", " + << "track_running_stats=" << this->options.track_running_stats() + << ")"; + } +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm1d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the BatchNorm1d function. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.BatchNorm1d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::BatchNorm1dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// BatchNorm1d +/// model(BatchNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +class TORCH_API BatchNorm1dImpl : public BatchNormImplBase<1, BatchNorm1dImpl> { + protected: + void _check_input_dim(const Tensor& input) override; + + public: + using BatchNormImplBase<1, BatchNorm1dImpl>::BatchNormImplBase; +}; + +/// A `ModuleHolder` subclass for `BatchNorm1dImpl`. +/// See the documentation for `BatchNorm1dImpl` class to learn what methods it +/// provides, and examples of how to use `BatchNorm1d` with +/// `torch::nn::BatchNorm1dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(BatchNorm1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm2d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the BatchNorm2d function. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.BatchNorm2d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::BatchNorm2dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// BatchNorm2d +/// model(BatchNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +class TORCH_API BatchNorm2dImpl : public BatchNormImplBase<2, BatchNorm2dImpl> { + protected: + void _check_input_dim(const Tensor& input) override; + + public: + using BatchNormImplBase<2, BatchNorm2dImpl>::BatchNormImplBase; +}; + +/// A `ModuleHolder` subclass for `BatchNorm2dImpl`. +/// See the documentation for `BatchNorm2dImpl` class to learn what methods it +/// provides, and examples of how to use `BatchNorm2d` with +/// `torch::nn::BatchNorm2dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(BatchNorm2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm3d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the BatchNorm3d function. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.BatchNorm3d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::BatchNorm3dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// BatchNorm3d +/// model(BatchNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +class TORCH_API BatchNorm3dImpl : public BatchNormImplBase<3, BatchNorm3dImpl> { + protected: + void _check_input_dim(const Tensor& input) override; + + public: + using BatchNormImplBase<3, BatchNorm3dImpl>::BatchNormImplBase; +}; + +/// A `ModuleHolder` subclass for `BatchNorm3dImpl`. +/// See the documentation for `BatchNorm3dImpl` class to learn what methods it +/// provides, and examples of how to use `BatchNorm3d` with +/// `torch::nn::BatchNorm3dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(BatchNorm3d); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/common.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/common.h new file mode 100644 index 0000000000000000000000000000000000000000..f172c82e7e632411be9ea3b8a4dc3d68ff5e74a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/common.h @@ -0,0 +1,97 @@ +#pragma once + +/// This macro enables a module with default arguments in its forward method +/// to be used in a Sequential module. +/// +/// Example usage: +/// +/// Let's say we have a module declared like this: +/// ``` +/// struct MImpl : torch::nn::Module { +/// public: +/// explicit MImpl(int value_) : value(value_) {} +/// torch::Tensor forward(int a, int b = 2, double c = 3.0) { +/// return torch::tensor(a + b + c); +/// } +/// private: +/// int value; +/// }; +/// TORCH_MODULE(M); +/// ``` +/// +/// If we try to use it in a Sequential module and run forward: +/// ``` +/// torch::nn::Sequential seq(M(1)); +/// seq->forward(1); +/// ``` +/// +/// We will receive the following error message: +/// ``` +/// MImpl's forward() method expects 3 argument(s), but received 1. +/// If MImpl's forward() method has default arguments, please make sure +/// the forward() method is declared with a corresponding +/// `FORWARD_HAS_DEFAULT_ARGS` macro. +/// ``` +/// +/// The right way to fix this error is to use the `FORWARD_HAS_DEFAULT_ARGS` +/// macro when declaring the module: +/// ``` +/// struct MImpl : torch::nn::Module { +/// public: +/// explicit MImpl(int value_) : value(value_) {} +/// torch::Tensor forward(int a, int b = 2, double c = 3.0) { +/// return torch::tensor(a + b + c); +/// } +/// protected: +/// /* +/// NOTE: looking at the argument list of `forward`: +/// `forward(int a, int b = 2, double c = 3.0)` +/// we saw the following default arguments: +/// ---------------------------------------------------------------- +/// 0-based index of default | Default value of arg +/// arg in forward arg list | (wrapped by `torch::nn::AnyValue()`) +/// ---------------------------------------------------------------- +/// 1 | torch::nn::AnyValue(2) +/// 2 | torch::nn::AnyValue(3.0) +/// ---------------------------------------------------------------- +/// Thus we pass the following arguments to the `FORWARD_HAS_DEFAULT_ARGS` +/// macro: +/// */ +/// FORWARD_HAS_DEFAULT_ARGS({1, torch::nn::AnyValue(2)}, {2, +/// torch::nn::AnyValue(3.0)}) +/// private: +/// int value; +/// }; +/// TORCH_MODULE(M); +/// ``` +/// Now, running the following would work: +/// ``` +/// torch::nn::Sequential seq(M(1)); +/// seq->forward(1); // This correctly populates the default arguments for +/// `MImpl::forward` +/// ``` +#define FORWARD_HAS_DEFAULT_ARGS(...) \ + template \ + friend struct torch::nn::AnyModuleHolder; \ + bool _forward_has_default_args() override { \ + return true; \ + } \ + unsigned int _forward_num_required_args() override { \ + std::pair args_info[] = {__VA_ARGS__}; \ + return args_info[0].first; \ + } \ + std::vector _forward_populate_default_args( \ + std::vector&& arguments) override { \ + std::pair args_info[] = {__VA_ARGS__}; \ + unsigned int num_all_args = std::rbegin(args_info)->first + 1; \ + TORCH_INTERNAL_ASSERT( \ + arguments.size() >= _forward_num_required_args() && \ + arguments.size() <= num_all_args); \ + std::vector ret = std::move(arguments); \ + ret.reserve(num_all_args); \ + for (auto& arg_info : args_info) { \ + if (arg_info.first > ret.size() - 1) \ + ret.emplace_back(std::move(arg_info.second)); \ + } \ + return ret; \ + } diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any.h new file mode 100644 index 0000000000000000000000000000000000000000..05983b1ea1064c5b1c025931f5f3e53a8dda7f8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any.h @@ -0,0 +1,372 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Stores a type erased `Module`. +/// +/// The PyTorch C++ API does not impose an interface on the signature of +/// `forward()` in `Module` subclasses. This gives you complete freedom to +/// design your `forward()` methods to your liking. However, this also means +/// there is no unified base type you could store in order to call `forward()` +/// polymorphically for any module. This is where the `AnyModule` comes in. +/// Instead of inheritance, it relies on type erasure for polymorphism. +/// +/// An `AnyModule` can store any `nn::Module` subclass that provides a +/// `forward()` method. This `forward()` may accept any types and return any +/// type. Once stored in an `AnyModule`, you can invoke the underlying module's +/// `forward()` by calling `AnyModule::forward()` with the arguments you would +/// supply to the stored module (though see one important limitation below). +/// Example: +/// +/// \rst +/// .. code-block:: cpp +/// +/// struct GenericTrainer { +/// torch::nn::AnyModule module; +/// +/// void train(torch::Tensor input) { +/// module.forward(input); +/// } +/// }; +/// +/// GenericTrainer trainer1{torch::nn::Linear(3, 4)}; +/// GenericTrainer trainer2{torch::nn::Conv2d(3, 4, 2)}; +/// \endrst +/// +/// As `AnyModule` erases the static type of the stored module (and its +/// `forward()` method) to achieve polymorphism, type checking of arguments is +/// moved to runtime. That is, passing an argument with an incorrect type to an +/// `AnyModule` will compile, but throw an exception at runtime: +/// +/// \rst +/// .. code-block:: cpp +/// +/// torch::nn::AnyModule module(torch::nn::Linear(3, 4)); +/// // Linear takes a tensor as input, but we are passing an integer. +/// // This will compile, but throw a `torch::Error` exception at runtime. +/// module.forward(123); +/// \endrst +/// +/// \rst +/// .. attention:: +/// One noteworthy limitation of `AnyModule` is that its `forward()` method +/// does not support implicit conversion of argument types. For example, if +/// the stored module's `forward()` method accepts a `float` and you call +/// `any_module.forward(3.4)` (where `3.4` is a `double`), this will throw +/// an exception. +/// \endrst +/// +/// The return type of the `AnyModule`'s `forward()` method is controlled via +/// the first template argument to `AnyModule::forward()`. It defaults to +/// `torch::Tensor`. To change it, you can write `any_module.forward()`, +/// for example. +/// +/// \rst +/// .. code-block:: cpp +/// +/// torch::nn::AnyModule module(torch::nn::Linear(3, 4)); +/// auto output = module.forward(torch::ones({2, 3})); +/// +/// struct IntModule { +/// int forward(int x) { return x; } +/// }; +/// torch::nn::AnyModule module(IntModule{}); +/// int output = module.forward(5); +/// \endrst +/// +/// The only other method an `AnyModule` provides access to on the stored +/// module is `clone()`. However, you may acquire a handle on the module via +/// `.ptr()`, which returns a `shared_ptr`. Further, if you know +/// the concrete type of the stored module, you can get a concrete handle to it +/// using `.get()` where `T` is the concrete module type. +/// +/// \rst +/// .. code-block:: cpp +/// +/// torch::nn::AnyModule module(torch::nn::Linear(3, 4)); +/// std::shared_ptr ptr = module.ptr(); +/// torch::nn::Linear linear(module.get()); +/// \endrst +class AnyModule { + public: + /// A default-constructed `AnyModule` is in an empty state. + AnyModule() = default; + + /// Constructs an `AnyModule` from a `shared_ptr` to concrete module object. + template + explicit AnyModule(std::shared_ptr module); + + /// Constructs an `AnyModule` from a concrete module object. + template < + typename ModuleType, + typename = torch::detail::enable_if_module_t> + explicit AnyModule(ModuleType&& module); + + /// Constructs an `AnyModule` from a module holder. + template + explicit AnyModule(const ModuleHolder& module_holder); + + /// Move construction and assignment is allowed, and follows the default + /// behavior of move for `std::unique_ptr`. + AnyModule(AnyModule&&) = default; + AnyModule& operator=(AnyModule&&) = default; + + /// Creates a shallow copy of an `AnyModule`. + AnyModule(const AnyModule& other); + AnyModule& operator=(const AnyModule& other); + + /// Creates a deep copy of an `AnyModule` if it contains a module, else an + /// empty `AnyModule` if it is empty. + AnyModule clone(optional device = nullopt) const; + + /// Assigns a module to the `AnyModule` (to circumvent the explicit + /// constructor). + template + AnyModule& operator=(std::shared_ptr module); + + /// Invokes `forward()` on the contained module with the given arguments, and + /// returns the return value as an `AnyValue`. Use this method when chaining + /// `AnyModule`s in a loop. + template + AnyValue any_forward(ArgumentTypes&&... arguments); + + /// Invokes `forward()` on the contained module with the given arguments, and + /// casts the returned `AnyValue` to the supplied `ReturnType` (which defaults + /// to `torch::Tensor`). + template + ReturnType forward(ArgumentTypes&&... arguments); + + /// Attempts to cast the underlying module to the given module type. Throws an + /// exception if the types do not match. + template > + T& get(); + + /// Attempts to cast the underlying module to the given module type. Throws an + /// exception if the types do not match. + template > + const T& get() const; + + /// Returns the contained module in a `nn::ModuleHolder` subclass if possible + /// (i.e. if `T` has a constructor for the underlying module type). + template + T get() const; + + /// Returns a `std::shared_ptr` whose dynamic type is that of the underlying + /// module. + std::shared_ptr ptr() const; + + /// Like `ptr()`, but casts the pointer to the given type. + template > + std::shared_ptr ptr() const; + + /// Returns the `type_info` object of the contained value. + const std::type_info& type_info() const; + + /// Returns true if the `AnyModule` does not contain a module. + bool is_empty() const noexcept; + + private: + /// Creates a `unique_ptr` pointing to a + /// `AnyModuleHolder` of the correct type. This method is used to deduce the + /// arguments of the module's `forward()` method. + template < + typename ModuleType, + typename Class, + typename ReturnType, + typename... ArgumentTypes> + std::unique_ptr make_holder( + std::shared_ptr&& module, + ReturnType (Class::*)(ArgumentTypes...)); + + /// Helper method invoked by const and non-const `get()`. + template + ModuleType& get_(ReturnType (ModuleType::*)(ArgumentTypes...)) const; + + /// Helper method invoked by const and non-const `get()`. + template + ModuleType& get_() const; + + /// The type erased module. + std::unique_ptr content_; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModule ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template +AnyModule::AnyModule(std::shared_ptr module) + : content_(make_holder( + std::move(module), + &std::remove_reference::type::forward)) { + // `AnyModule` can only store an `nn::Module` subclass object that provides + // a `forward()` method that has a non-templatized return type. + // (e.g. `AnyModule` cannot store `nn::Sequential`, because `nn::Sequential`'s + // `forward()` method has a templatized return type.) + static_assert( + torch::detail::is_module::value, + "Can only store object derived from nn::Module into AnyModule"); + static_assert( + torch::detail::has_forward::value, + "Can only store module with a forward() method that has a non-templatized" + " argument type and return type into AnyModule (e.g. we cannot store nn::Sequential" + "into AnyModule, because its forward() method's argument type and return type are templatized." + " If you need to use nn::Sequentials inside each other you can subclass " + "nn::Sequential and write a non-templatized forward function for it. You can checkout " + "https://github.com/pytorch/vision/blob/2f46070f3cb1ea894d82578f3dc5677f82f34958/torchvision/csrc/models/mnasnet.cpp#L59 " + "for an example on how to do this.)."); +} + +template +AnyModule::AnyModule(ModuleType&& module) + : AnyModule( + std::make_shared(std::forward(module))) {} + +template +AnyModule::AnyModule(const ModuleHolder& module_holder) + : AnyModule(module_holder.ptr()) {} + +inline AnyModule::AnyModule(const AnyModule& other) + : content_(other.content_ ? other.content_->copy() : nullptr) {} + +inline AnyModule& AnyModule::operator=(const AnyModule& other) { + if (this != &other) { + content_ = other.content_ ? other.content_->copy() : nullptr; + } + return *this; +} + +inline AnyModule AnyModule::clone(optional device) const { + AnyModule clone; + clone.content_ = content_ ? content_->clone_module(device) : nullptr; + return clone; +} + +template +AnyModule& AnyModule::operator=(std::shared_ptr module) { + // NOLINTNEXTLINE(cppcoreguidelines-c-copy-assignment-signature) + return (*this = AnyModule(std::move(module))); +} + +template +AnyValue AnyModule::any_forward(ArgumentTypes&&... arguments) { + TORCH_CHECK(!is_empty(), "Cannot call forward() on an empty AnyModule"); + std::vector values; + values.reserve(sizeof...(ArgumentTypes)); + torch::apply( + [&values](AnyValue&& value) { values.push_back(std::move(value)); }, + AnyValue(std::forward(arguments))...); + return content_->forward(std::move(values)); +} + +template +ReturnType AnyModule::forward(ArgumentTypes&&... arguments) { + return any_forward(std::forward(arguments)...) + .template get(); +} + +template +T& AnyModule::get() { + TORCH_CHECK(!is_empty(), "Cannot call get() on an empty AnyModule"); + return get_(); +} + +template +const T& AnyModule::get() const { + TORCH_CHECK(!is_empty(), "Cannot call get() on an empty AnyModule"); + return get_(); +} + +template +T AnyModule::get() const { + return T(ptr()); +} + +inline std::shared_ptr AnyModule::ptr() const { + TORCH_CHECK(!is_empty(), "Cannot call ptr() on an empty AnyModule"); + return content_->ptr(); +} + +template +std::shared_ptr AnyModule::ptr() const { + TORCH_CHECK(!is_empty(), "Cannot call ptr() on an empty AnyModule"); + // Call get() but discard the value, just to do the type checking. + get_(); + return std::dynamic_pointer_cast(ptr()); +} + +inline const std::type_info& AnyModule::type_info() const { + TORCH_CHECK(!is_empty(), "Cannot call type_info() on an empty AnyModule"); + return content_->type_info; +} + +inline bool AnyModule::is_empty() const noexcept { + return content_ == nullptr; +} + +// Private Methods + +template < + typename ModuleType, + typename Class, + typename ReturnType, + typename... ArgumentTypes> +std::unique_ptr AnyModule::make_holder( + std::shared_ptr&& module, + ReturnType (Class::*)(ArgumentTypes...)) { + static_assert( + torch::detail::check_not_lvalue_references(), + "Modules stored inside AnyModule must not take references. " + "Use pointers instead."); + static_assert( + !std::is_void::value, + "AnyModule cannot store modules that return void " + "(you can return a dummy value)."); + return std::make_unique< + AnyModuleHolder, ArgumentTypes...>>( + std::move(module)); +} + +template +ModuleType& AnyModule::get_() const { + using M = typename std::remove_reference::type; + static_assert( + torch::detail::has_forward::value, + "Can only call AnyModule::get with a type T that has a forward method"); + return get_(&M::forward); +} + +template +ModuleType& AnyModule::get_( + ReturnType (ModuleType::*)(ArgumentTypes...)) const { + if (typeid(ModuleType).hash_code() == type_info().hash_code()) { + return *static_cast&>( + *content_) + .module; + } + AT_ERROR( + "Attempted to cast module of type ", + c10::demangle(type_info().name()), + " to type ", + c10::demangle(typeid(ModuleType).name())); +} + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_value.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_value.h new file mode 100644 index 0000000000000000000000000000000000000000..3e6c23ef977ca88237b2999a43eaa08309a43c29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_value.h @@ -0,0 +1,124 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyValue ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// An implementation of `std::any` which stores +/// a type erased object, whose concrete value can be retrieved at runtime by +/// checking if the `typeid()` of a requested type matches the `typeid()` of +/// the object stored. +class AnyValue { + public: + /// Move construction and assignment is allowed, and follows the default + /// behavior of move for `std::unique_ptr`. + AnyValue(AnyValue&&) = default; + AnyValue& operator=(AnyValue&&) = default; + + /// Copy construction and assignment is allowed. + AnyValue(const AnyValue& other) : content_(other.content_->clone()) {} + AnyValue& operator=(const AnyValue& other) { + content_ = other.content_->clone(); + return *this; + } + + /// Constructs the `AnyValue` from value type. + template + // NOLINTNEXTLINE(bugprone-forwarding-reference-overload) + explicit AnyValue(T&& value) + : content_(std::make_unique>>(std::forward(value))) { + } + + /// Returns a pointer to the value contained in the `AnyValue` if the type + /// passed as template parameter matches the type of the value stored, and + /// returns a null pointer otherwise. + template + T* try_get() { + static_assert( + !std::is_reference::value, + "AnyValue stores decayed types, you cannot cast it to a reference type"); + static_assert( + !std::is_array::value, + "AnyValue stores decayed types, you must cast it to T* instead of T[]"); + if (typeid(T).hash_code() == type_info().hash_code()) { + return &static_cast&>(*content_).value; + } + return nullptr; + } + + /// Returns the value contained in the `AnyValue` if the type passed as + /// template parameter matches the type of the value stored, and throws an + /// exception otherwise. + template + T get() { + if (auto* maybe_value = try_get()) { + return *maybe_value; + } + AT_ERROR( + "Attempted to cast AnyValue to ", + c10::demangle(typeid(T).name()), + ", but its actual type is ", + c10::demangle(type_info().name())); + } + + /// Returns the `type_info` object of the contained value. + const std::type_info& type_info() const noexcept { + return content_->type_info; + } + + private: + friend struct AnyModulePlaceholder; + friend struct TestAnyValue; + + /// \internal + /// The static type of the object we store in the `AnyValue`, which erases the + /// actual object's type, allowing us only to check the `type_info` of the + /// type stored in the dynamic type. + struct Placeholder { + explicit Placeholder(const std::type_info& type_info_) noexcept + : type_info(type_info_) {} + Placeholder(const Placeholder&) = default; + Placeholder(Placeholder&&) = default; + virtual ~Placeholder() = default; + virtual std::unique_ptr clone() const { + TORCH_CHECK(false, "clone() should only be called on `AnyValue::Holder`"); + } + const std::type_info& type_info; + }; + + /// \internal + /// The dynamic type of the object we store in the `AnyValue`, which hides the + /// actual object we have erased in this `AnyValue`. + template + struct Holder : public Placeholder { + /// A template because T&& would not be universal reference here. + template + // NOLINTNEXTLINE(bugprone-forwarding-reference-overload) + explicit Holder(U&& value_) noexcept + : Placeholder(typeid(T)), value(std::forward(value_)) {} + std::unique_ptr clone() const override { + return std::make_unique>(value); + } + T value; + }; + + /// The type erased object. + std::unique_ptr content_; +}; + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/functional.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/functional.h new file mode 100644 index 0000000000000000000000000000000000000000..dbd2b0aaebdcce8d2ff39e1fa77576008fbb9bba --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/functional.h @@ -0,0 +1,105 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace nn { + +/// Wraps a function in a `Module`. +/// +/// The `Functional` module allows wrapping an arbitrary function or function +/// object in an `nn::Module`. This is primarily handy for usage in +/// `Sequential`. +/// +/// \rst +/// .. code-block:: cpp +/// +/// Sequential sequential( +/// Linear(3, 4), +/// Functional(torch::relu), +/// BatchNorm1d(3), +/// Functional(torch::elu, /*alpha=*/1)); +/// \endrst +/// +/// While a `Functional` module only accepts a single `Tensor` as input, it is +/// possible for the wrapped function to accept further arguments. However, +/// these have to be bound *at construction time*. For example, if +/// you want to wrap `torch::leaky_relu`, which accepts a `slope` scalar as its +/// second argument, with a particular value for its `slope` in a `Functional` +/// module, you could write +/// +/// \rst +/// .. code-block:: cpp +/// +/// Functional(torch::leaky_relu, /*slope=*/0.5) +/// \endrst +/// +/// The value of `0.5` is then stored within the `Functional` object and +/// supplied to the function call at invocation time. Note that such bound +/// values are evaluated eagerly and stored a single time. See the documentation +/// of [std::bind](https://en.cppreference.com/w/cpp/utility/functional/bind) +/// for more information on the semantics of argument binding. +/// +/// \rst +/// .. attention:: +/// After passing any bound arguments, the function must accept a single +/// tensor and return a single tensor. +/// \endrst +/// +/// Note that `Functional` overloads the call operator (`operator()`) such that +/// you can invoke it with `my_func(...)`. +class TORCH_API FunctionalImpl : public torch::nn::Cloneable { + public: + using Function = std::function; + + /// Constructs a `Functional` from a function object. + explicit FunctionalImpl(Function function); + + template < + typename SomeFunction, + typename... Args, + typename = torch::enable_if_t<(sizeof...(Args) > 0)>> + explicit FunctionalImpl(SomeFunction original_function, Args&&... args) + // NOLINTNEXTLINE(modernize-avoid-bind) + : function_(std::bind( + original_function, + /*input=*/std::placeholders::_1, + std::forward(args)...)) { + // std::bind is normally evil, but (1) gcc is broken w.r.t. handling + // parameter pack expansion in lambdas and (2) moving parameter packs into + // a lambda only works with C++14, so std::bind is the more move-aware + // solution here. + } + + void reset() override; + + /// Pretty prints the `Functional` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// Forwards the `input` tensor to the underlying (bound) function object. + Tensor forward(Tensor input); + + /// Calls forward(input). + Tensor operator()(Tensor input); + + bool is_serializable() const override; + + private: + Function function_; +}; + +/// A `ModuleHolder` subclass for `FunctionalImpl`. +/// See the documentation for `FunctionalImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(Functional); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/moduledict.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/moduledict.h new file mode 100644 index 0000000000000000000000000000000000000000..1f7fffa5919fd471414534971e84e683301b98de --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/moduledict.h @@ -0,0 +1,262 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// An OrderedDict of `Module`s that registers its elements by their `key`s. +/// +/// \rst +/// .. code-block:: cpp +/// +/// torch::OrderedDict> ordereddict = { +/// {"linear", Linear(10, 3).ptr()}, +/// {"conv", Conv2d(1, 2, 3).ptr()}, +/// {"dropout", Dropout(0.5).ptr()}, +/// }; +/// torch::nn::ModuleDict dict1(ordereddict); +/// +/// for (const auto &module : *dict1) { +/// module->pretty_print(std::cout); +/// } +/// +/// std::vector>> list = { +/// {"linear", Linear(10, 3).ptr()}, +/// {"conv", Conv2d(1, 2, 3).ptr()}, +/// {"dropout", Dropout(0.5).ptr()}, +/// }; +/// torch::nn::ModuleDict dict2(list); +/// +/// for (const auto &module : *dict2) { +/// module->pretty_print(std::cout); +/// } +/// +/// \endrst +/// +/// Why should you use `ModuleDict` instead of a simple `map` or `OrderedDict`? +/// The value a `ModuleDict` provides over manually calling an ordered map of +/// modules is that it allows treating the whole container *as a single module*, +/// such that performing a transformation on the `ModuleDict` applies to each of +/// the modules it stores (which are each a registered submodule of the +/// `ModuleDict`). For example, calling `.to(torch::kCUDA)` on a `ModuleDict` +/// will move each module in the map to CUDA memory. For example: +/// +/// \rst +/// .. code-block:: cpp +/// +/// torch::OrderedDict> ordereddict = { +/// {"linear", Linear(10, 3).ptr()}, +/// {"conv", Conv2d(1, 2, 3).ptr()}, +/// {"dropout", Dropout(0.5).ptr()}, +/// }; +/// torch::nn::ModuleDict dict(ordereddict); +/// +/// // Convert all modules to CUDA. +/// dict->to(torch::kCUDA); +/// +/// \endrst +/// +/// Finally, `ModuleDict` provides a lightweight container API, such as allowing +/// iteration over submodules, positional access, adding new modules from a +/// vector of key-module pairs or an `OrderedDict` or another `ModuleDict` after +/// construction via `update`. +class ModuleDictImpl : public Cloneable { + public: + using Iterator = + torch::OrderedDict>::Iterator; + using ConstIterator = + torch::OrderedDict>::ConstIterator; + + ModuleDictImpl() = default; + + /// Constructs the `ModuleDict` from a list of string-Module pairs. + explicit ModuleDictImpl( + const std::vector>>& + modules) { + update(modules); + } + + /// Constructs the `ModuleDict` from an `OrderedDict`. + explicit ModuleDictImpl( + const torch::OrderedDict>& modules) { + update(modules); + } + + /// Return the items in the `ModuleDict`. + std::vector>> items() const { + return modules_.pairs(); + } + + /// Return the keys in the `ModuleDict`. + std::vector keys() const { + return modules_.keys(); + } + + /// Return the values in the `ModuleDict`. + std::vector> values() const { + return modules_.values(); + } + + /// Return an iterator to the start of `ModuleDict`. + Iterator begin() { + return modules_.begin(); + } + + /// Return a const iterator to the start of `ModuleDict`. + ConstIterator begin() const { + return modules_.begin(); + } + + /// Return an iterator to the end of `ModuleDict`. + Iterator end() { + return modules_.end(); + } + + /// Return a const iterator to the end of `ModuleDict`. + ConstIterator end() const { + return modules_.end(); + } + + /// Return the number of items currently stored in the `ModuleDict`. + size_t size() const noexcept { + return modules_.size(); + } + + /// Return true if the `ModuleDict` is empty, otherwise return false. + bool empty() const noexcept { + return modules_.is_empty(); + } + + /// Check if the centain parameter with the key in the `ModuleDict`. + bool contains(const std::string& key) const noexcept { + return modules_.contains(key); + } + + /// Remove all items from the `ModuleDict`. + void clear() { + // Not remove the registration of modules to make it consistent with python + // version. + modules_.clear(); + } + + /// Special cloning function for `ModuleDict` because it does not use + /// `reset()`. + std::shared_ptr clone( + const optional& device = nullopt) const override { + auto clone = std::make_shared(); + for (const auto& module : modules_) { + clone->insert(module.key(), module.value()->clone(device)); + } + return clone; + } + + /// `reset()` is empty for `ModuleDict`, since it does not have parameters of + /// its own. + void reset() override {} + + /// Pretty prints the `ModuleDict` into the given `stream`. + void pretty_print(std::ostream& stream) const override { + stream << "torch::nn::ModuleDict"; + } + + /// Attempts to returns the `Module` associated with the given `key`. Throws + /// an exception if no such `key` is stored in the `ModuleDict`. Check + /// contains(key) before for a non-throwing way of access. + std::shared_ptr operator[](const std::string& key) const { + return modules_[key]; + } + + /// Attempts to return the module at the given key as the requested type. + /// Throws an exception if no such `key` is stored in the `ModuleDict`. + /// Check contains(key) before for a non-throwing way of access. + template + T& at(const std::string& key) { + static_assert( + torch::detail::is_module::value, + "Can only call ModuleList::at with an nn::Module type"); + auto module = modules_[key]->as(); + TORCH_CHECK( + module, + "Unable to cast module[", + key, + "] to ", + c10::demangle(typeid(T).name())); + return *module; + } + + /// Attempts to return the module at the given key as the requested type. + /// Throws an exception if no such `key` is stored in the `ModuleDict`. + /// Check contains(key) before for a non-throwing way of access. + template + const T& at(const std::string& key) const { + static_assert( + torch::detail::is_module::value, + "Can only call ModuleList::at with an nn::Module type"); + const auto module = modules_[key]->as(); + TORCH_CHECK( + module, + "Unable to cast module[", + key, + "] to ", + c10::demangle(typeid(T).name())); + return *module; + } + + /// Removes and returns the `Module` associated with the given `key`. + /// Throws an exception if no such `key` is stored in the `ModuleDict`. + /// Check contains(key) before for a non-throwing way of access. + std::shared_ptr pop(const std::string& key) { + auto module = modules_[key]; + modules_.erase(key); + // Not remove the registration of the module to make it consistent with + // python version. + return module; + } + + /// Updated the `ModuleDict` with a vector of key-module pairs. + void update( + const std::vector>>& + modules) { + for (auto& item : modules) { + insert(item.first, item.second); + } + } + + /// Updated the `ModuleDict` with key-value pairs from `OrderedDict` or + /// `ModuleDict`. + template + void update(const Container& container) { + for (auto& item : container) { + insert(item.key(), item.value()); + } + } + + private: + /// Private `OrderedDict` holding the key-Module pairs. + torch::OrderedDict> modules_; + + /// Insert a key-module pair by overwriting existing keys, + /// and register or replace the `Module`. + void insert(const std::string& key, std::shared_ptr module) { + if (contains(key)) { + modules_[key] = std::move(module); + replace_module(key, modules_[key]); + } else { + modules_.insert(key, std::move(module)); + register_module(key, modules_.back().value()); + } + } +}; + +/// A `ModuleHolder` subclass for `ModuleDictImpl`. +/// See the documentation for `ModuleDictImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(ModuleDict); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/modulelist.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/modulelist.h new file mode 100644 index 0000000000000000000000000000000000000000..72a76163ac0344d3a81737e185160c9007b2f70d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/modulelist.h @@ -0,0 +1,274 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { +namespace nn { + +/// A list of `Module`s that registers its elements. +/// +/// \rst +/// .. code-block:: cpp +/// +/// torch::nn::ModuleList mlist( +/// torch::nn::Linear(3, 4), +/// torch::nn::BatchNorm1d(4), +/// torch::nn::Dropout(0.5) +/// ); +/// +/// for (const auto &module : *mlist) { +/// module->pretty_print(std::cout); +/// } +/// +/// \endrst +/// +/// Why should you use `ModuleList` instead of a simple `std::vector`? The value +/// a `ModuleList` provides over manually calling a sequence of modules is that +/// it allows treating the whole container *as a single module*, such that +/// performing a transformation on the `ModuleList` applies to each of the +/// modules it stores (which are each a registered submodule of the +/// `ModuleList`). For example, calling +/// `.to(torch::kCUDA)` on a `ModuleList` will move each module in the list to +/// CUDA memory. For example: +/// +/// \rst +/// .. code-block:: cpp +/// +/// torch::nn::ModuleList mlist( +/// torch::nn::Linear(3, 4), +/// torch::nn::BatchNorm1d(4), +/// torch::nn::Dropout(0.5) +/// ); +/// +/// // Convert all modules to CUDA. +/// mlist->to(torch::kCUDA); +/// +/// \endrst +/// +/// Finally, `ModuleList` provides a lightweight container API, such as allowing +/// iteration over submodules, positional access, adding a new module after +/// construction via `push_back`, as well as joining two `ModuleList`s via +/// `extend`. +class ModuleListImpl : public Cloneable { + public: + using Iterator = std::vector>::iterator; + using ConstIterator = std::vector>::const_iterator; + + ModuleListImpl() = default; + + /// Constructs the `ModuleList` from a variadic list of modules. + template + explicit ModuleListImpl(Modules&&... modules) { + modules_.reserve(sizeof...(Modules)); + push_back_var(std::forward(modules)...); + } + + /// Special cloning function for `ModuleList` because it does not use + /// `reset()`. + std::shared_ptr clone( + const optional& device = nullopt) const override { + auto clone = std::make_shared(); + for (const auto& module : modules_) { + clone->push_back(module->clone(device)); + } + return clone; + } + + /// `reset()` is empty for `ModuleList`, since it does not have parameters of + /// its own. + void reset() override {} + + /// Pretty prints the `ModuleList` module into the given `stream`. + void pretty_print(std::ostream& stream) const override { + stream << "torch::nn::ModuleList"; + } + + void push_back(std::shared_ptr module) { + modules_.push_back(std::move(module)); + const auto index = modules_.size() - 1; + register_module(c10::to_string(index), modules_[index]); + } + + /// Adds a new `Module` to the `ModuleList` container, moving or copying + /// it into a `shared_ptr` internally. This method allows passing value types, + /// and letting the container deal with the boxing. + template > + void push_back(M&& module) { + using Type = typename std::remove_reference::type; + push_back(std::make_shared(std::forward(module))); + } + + /// Unwraps the contained module of a `ModuleHolder` and adds it to the + /// `ModuleList`. + template + void push_back(const ModuleHolder& module_holder) { + push_back(module_holder.ptr()); + } + + /// Iterates over the container and calls `push_back()` on each value. + template + void extend(const Container& container) { + for (const auto& module : container) { + push_back(module); + } + } + + /// Returns an iterator to the start of the `ModuleList`. + Iterator begin() { + return modules_.begin(); + } + + /// Returns a const iterator to the start of the `ModuleList`. + ConstIterator begin() const { + return modules_.begin(); + } + + /// Returns an iterator to the end of the `ModuleList`. + Iterator end() { + return modules_.end(); + } + + /// Returns a const iterator to the end of the `ModuleList`. + ConstIterator end() const { + return modules_.end(); + } + + /// Attempts to return the module at the given index as the requested type. + /// Throws an exception if the index is out of bounds or the types do not + /// match. + template + T& at(size_t index) { + static_assert( + torch::detail::is_module::value, + "Can only call ModuleList::at with an nn::Module type"); + TORCH_CHECK(index < size(), "Index out of range"); + auto module = modules_[index]->as(); + TORCH_CHECK( + module, + "Unable to cast module[", + index, + "] to ", + c10::demangle(typeid(T).name())); + return *module; + } + + /// Attempts to return the module at the given index as the requested type. + /// Throws an exception if the index is out of bounds or the types do not + /// match. + template + const T& at(size_t index) const { + static_assert( + torch::detail::is_module::value, + "Can only call ModuleList::at with an nn::Module type"); + TORCH_CHECK(index < size(), "Index out of range"); + const auto module = modules_[index]->as(); + TORCH_CHECK( + module, + "Unable to cast module[", + index, + "] to ", + c10::demangle(typeid(T).name())); + return *module; + } + + /// Attempts to return a `std::shared_ptr` whose dynamic type is that of the + /// underlying module at the given index. Throws an exception if the index is + /// out of bounds. + std::shared_ptr ptr(size_t index) const { + TORCH_CHECK(index < size(), "Index out of range"); + return modules_[index]; + } + + /// Attempts to return a `std::shared_ptr` whose type is the one provided. + /// Throws an exception if the index is out of bounds or the types do not + /// match. + template + std::shared_ptr ptr(size_t index) const { + static_assert( + torch::detail::is_module::value, + "Can only call ModuleList::ptr with an nn::Module type"); + TORCH_CHECK(index < size(), "Index out of range"); + return std::dynamic_pointer_cast(modules_[index]); + } + + /// Like `ptr(index)`. + std::shared_ptr operator[](size_t index) const { + // This is the only method we can call without a type. + return ptr(index); + } + + /// The current size of the `ModuleList` container. + size_t size() const noexcept { + return modules_.size(); + } + + /// True if there are no modules in the `ModuleList`. + bool is_empty() const noexcept { + return size() == 0; + } + + void insert(size_t index, std::shared_ptr module) { + TORCH_CHECK(index <= size(), "Index out of range"); + + if (index == size()) + push_back(std::move(module)); + else { + modules_.insert( + modules_.begin() + Iterator::difference_type(index), + std::move(module)); + + for (const auto i : c10::irange(index, size() - 1)) { + (void)i; // Suppress unused variable warning + replace_module(c10::to_string(index), modules_[index]); + } + register_module(c10::to_string(size() - 1), modules_.back()); + } + } + + /// Unwraps the contained module of a `ModuleHolder` and inserts it in the + /// `ModuleList`. + template + void insert(size_t index, const ModuleHolder& module_holder) { + insert(index, module_holder.ptr()); + } + + /// inserts a new `Module` to the `ModuleList` container, moving or copying + /// it into a `shared_ptr` internally. This method allows passing value types, + /// and letting the container deal with the boxing. + template > + void insert(size_t index, M&& module) { + using Type = typename std::remove_reference::type; + insert(index, std::make_shared(std::forward(module))); + } + + private: + template + void push_back_var(Head&& head, Tail&&... tail) { + push_back(std::forward(head)); + // Recursively calls this method, until the parameter pack only thas this + // entry left. Then calls `push_back()` a final time (above). + push_back_var(std::forward(tail)...); + } + + /// The base case, when the list of modules is empty. + void push_back_var() {} + + // Box the AnyModules to give ModuleList reference semantics, like the rest of + // the API. Note that this is not required otherwise, this could just be a + // `vector`. + std::vector> modules_; +}; + +/// A `ModuleHolder` subclass for `ModuleListImpl`. +/// See the documentation for `ModuleListImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(ModuleList); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/named_any.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/named_any.h new file mode 100644 index 0000000000000000000000000000000000000000..00d39de17f4012cbfb9aa4e56327d26c66f33bc2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/named_any.h @@ -0,0 +1,94 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Stores a type erased `Module` with name. +/// +/// The `NamedAnyModule` class enables the following API for constructing +/// `nn::Sequential` with named submodules: +/// \rst +/// .. code-block:: cpp +/// +/// struct M : torch::nn::Module { +/// explicit M(int value_) : value(value_) {} +/// int value; +/// int forward() { +/// return value; +/// } +/// }; +/// +/// Sequential sequential({ +/// {"m1", std::make_shared(1)}, // shared pointer to `Module` is +/// supported {std::string("m2"), M(2)}, // `Module` is supported +/// {"linear1", Linear(10, 3)} // `ModuleHolder` is supported +/// }); +/// \endrst +class NamedAnyModule { + public: + /// Creates a `NamedAnyModule` from a (boxed) `Module`. + template + NamedAnyModule(std::string name, std::shared_ptr module_ptr) + : NamedAnyModule(std::move(name), AnyModule(std::move(module_ptr))) {} + + /// Creates a `NamedAnyModule` from a `Module`, moving or copying it + /// into a `shared_ptr` internally. + // NOTE: We need to use `std::remove_reference::type` to get rid of + // any reference components for make_unique. + template > + NamedAnyModule(std::string name, M&& module) + : NamedAnyModule( + std::move(name), + std::make_shared::type>( + std::forward(module))) {} + + /// Creates a `NamedAnyModule` from a `Module` that is unwrapped from + /// a `ModuleHolder`. + template + NamedAnyModule(std::string name, const ModuleHolder& module_holder) + : NamedAnyModule(std::move(name), module_holder.ptr()) {} + + /// Creates a `NamedAnyModule` from a type-erased `AnyModule`. + NamedAnyModule(std::string name, AnyModule any_module) + : name_(std::move(name)), module_(std::move(any_module)) {} + + /// Returns a reference to the name. + const std::string& name() const noexcept { + return name_; + } + + /// Returns a reference to the module. + AnyModule& module() noexcept { + return module_; + } + + /// Returns a const reference to the module. + const AnyModule& module() const noexcept { + return module_; + } + + private: + std::string name_; + AnyModule module_; +}; + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/conv.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/conv.h new file mode 100644 index 0000000000000000000000000000000000000000..29320d3bb3ef2031e40c5e29c9e4790d7886b6e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/conv.h @@ -0,0 +1,451 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace torch { +namespace nn { + +/// Base class for all (dimension-specialized) convolution modules. +template +class ConvNdImpl : public torch::nn::Cloneable { + public: + explicit ConvNdImpl(detail::ConvNdOptions options_) + : options(std::move(options_)) { + // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall) + reset(); + } + + void reset() override { + TORCH_CHECK( + options.in_channels() > 0 && options.groups() > 0 && + options.out_channels() > 0, + "in_channels, groups and out_channels must be a positive integer."); + TORCH_CHECK( + options.in_channels() % options.groups() == 0, + "in_channels must be divisible by groups"); + TORCH_CHECK( + options.out_channels() % options.groups() == 0, + "out_channels must be divisible by groups"); + + std::visit( + c10::overloaded( + [&](enumtype::kValid) { + _reversed_padding_repeated_twice.resize(2 * D); + std::fill_n(_reversed_padding_repeated_twice.begin(), 2 * D, 0); + }, + [&](enumtype::kSame) { + for (const auto i : c10::irange(D)) { + const auto stride = (*options.stride())[i]; + TORCH_CHECK( + stride == 1, + "padding='same' is not supported for strided convolutions"); + } + + _reversed_padding_repeated_twice.resize(2 * D); + for (const auto i : c10::irange(D)) { + const auto dilation = (*options.dilation())[i]; + const auto kernel_size = (*options.kernel_size())[i]; + const auto total_padding = dilation * (kernel_size - 1); + auto left_pad = total_padding / 2; + auto right_pad = total_padding - left_pad; + _reversed_padding_repeated_twice[2 * i] = left_pad; + _reversed_padding_repeated_twice[2 * i + 1] = right_pad; + } + }, + [&](const ExpandingArray& pad) { + _reversed_padding_repeated_twice = + torch::nn::modules::utils::_reverse_repeat_vector(pad, 2); + }), + options.padding()); + + if (options.transposed()) { + std::vector weight_sizes = { + options.in_channels(), options.out_channels() / options.groups()}; + weight_sizes.insert( + weight_sizes.end(), + (*options.kernel_size()).begin(), + (*options.kernel_size()).end()); + weight = this->register_parameter("weight", torch::empty(weight_sizes)); + } else { + std::vector weight_sizes = { + options.out_channels(), options.in_channels() / options.groups()}; + weight_sizes.insert( + weight_sizes.end(), + (*options.kernel_size()).begin(), + (*options.kernel_size()).end()); + weight = this->register_parameter("weight", torch::empty(weight_sizes)); + } + + if (options.bias()) { + bias = this->register_parameter( + "bias", torch::empty({options.out_channels()})); + } else { + this->register_parameter("bias", Tensor(), /*requires_grad=*/false); + } + + reset_parameters(); + } + + void reset_parameters() { + init::kaiming_uniform_( + weight, + /*a=*/std::sqrt(5)); // NOLINT(cppcoreguidelines-avoid-magic-numbers) + + if (bias.defined()) { + auto [fan_in, fan_out] = init::_calculate_fan_in_and_fan_out(weight); + auto bound = 1 / std::sqrt(fan_in); + init::uniform_(bias, -bound, bound); + } + } + + /// Pretty prints the `Conv{1,2,3}d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override { + stream << "torch::nn::Conv" << D << "d" + << "(" << options.in_channels() << ", " << options.out_channels() + << ", kernel_size=" << options.kernel_size() + << ", stride=" << options.stride(); + std::visit( + c10::overloaded( + [&](enumtype::kValid) { stream << ", padding='valid'"; }, + [&](enumtype::kSame) { stream << ", padding='same'"; }, + [&](const ExpandingArray& pad) { + if (*pad != *ExpandingArray(0)) { + stream << ", padding=" << pad; + } + }), + options.padding()); + if (*options.dilation() != *ExpandingArray(1)) { + stream << ", dilation=" << options.dilation(); + } + if (*options.output_padding() != *ExpandingArray(0)) { + stream << ", output_padding=" << options.output_padding(); + } + if (options.groups() != 1) { + stream << ", groups=" << options.groups(); + } + if (!options.bias()) { + stream << ", bias=" << std::boolalpha << false; + } + if (!std::get_if(&options.padding_mode())) { + stream << ", padding_mode=" + << enumtype::get_enum_name(options.padding_mode()); + } + stream << ")"; + } + + /// The options with which this `Module` was constructed. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + detail::ConvNdOptions options; + + /// The learned kernel (or "weight"). + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + Tensor weight; + + /// The learned bias. Only defined if the `bias` option was true. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + Tensor bias; + + protected: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector _reversed_padding_repeated_twice; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Conv1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies convolution over a 1-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Conv1d to learn about +/// the exact behavior of this module. +/// +/// See the documentation for `torch::nn::Conv1dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Conv1d model(Conv1dOptions(3, 2, 3).stride(1).bias(false)); +/// ``` +class TORCH_API Conv1dImpl : public ConvNdImpl<1, Conv1dImpl> { + public: + Conv1dImpl( + int64_t input_channels, + int64_t output_channels, + ExpandingArray<1> kernel_size) + : Conv1dImpl( + Conv1dOptions(input_channels, output_channels, kernel_size)) {} + explicit Conv1dImpl(Conv1dOptions options_); + Tensor forward(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `Conv1dImpl`. +/// See the documentation for `Conv1dImpl` class to learn what methods it +/// provides, and examples of how to use `Conv1d` with +/// `torch::nn::Conv1dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Conv1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Conv2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies convolution over a 2-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Conv2d to learn about +/// the exact behavior of this module. +/// +/// See the documentation for `torch::nn::Conv2dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Conv2d model(Conv2dOptions(3, 2, 3).stride(1).bias(false)); +/// ``` +class TORCH_API Conv2dImpl : public ConvNdImpl<2, Conv2dImpl> { + public: + Conv2dImpl( + int64_t input_channels, + int64_t output_channels, + ExpandingArray<2> kernel_size) + : Conv2dImpl( + Conv2dOptions(input_channels, output_channels, kernel_size)) {} + explicit Conv2dImpl(Conv2dOptions options_); + Tensor forward(const Tensor& input); + + protected: + Tensor _conv_forward(const Tensor& input, const Tensor& weight); +}; + +/// A `ModuleHolder` subclass for `Conv2dImpl`. +/// See the documentation for `Conv2dImpl` class to learn what methods it +/// provides, and examples of how to use `Conv2d` with +/// `torch::nn::Conv2dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Conv2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Conv3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies convolution over a 3-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Conv3d to learn about +/// the exact behavior of this module. +/// +/// See the documentation for `torch::nn::Conv3dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Conv3d model(Conv3dOptions(3, 2, 3).stride(1).bias(false)); +/// ``` +class TORCH_API Conv3dImpl : public ConvNdImpl<3, Conv3dImpl> { + public: + Conv3dImpl( + int64_t input_channels, + int64_t output_channels, + ExpandingArray<3> kernel_size) + : Conv3dImpl( + Conv3dOptions(input_channels, output_channels, kernel_size)) {} + explicit Conv3dImpl(Conv3dOptions options_); + Tensor forward(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `Conv3dImpl`. +/// See the documentation for `Conv3dImpl` class to learn what methods it +/// provides, and examples of how to use `Conv3d` with +/// `torch::nn::Conv3dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Conv3d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~ ConvTranspose ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Base class for all (dimension-specialized) convolution transpose modules. +template +class ConvTransposeNdImpl : public ConvNdImpl { + public: + using torch::nn::ConvNdImpl::ConvNdImpl; + explicit ConvTransposeNdImpl(detail::ConvNdOptions options_) + : ConvNdImpl(options_) { + TORCH_INTERNAL_ASSERT( + std::holds_alternative>(this->options.padding()), + "ConvTranspose padding cannot be a string"); + } + + /// Pretty prints the `ConvTranspose{1,2,3}d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override { + stream << "torch::nn::ConvTranspose" << D << "d" + << "(" << this->options.in_channels() << ", " + << this->options.out_channels() + << ", kernel_size=" << this->options.kernel_size() + << ", stride=" << this->options.stride(); + const auto& pad = padding(); + if (*pad != *ExpandingArray(0)) { + stream << ", padding=" << pad; + } + if (*this->options.dilation() != *ExpandingArray(1)) { + stream << ", dilation=" << this->options.dilation(); + } + if (*this->options.output_padding() != *ExpandingArray(0)) { + stream << ", output_padding=" << this->options.output_padding(); + } + if (this->options.groups() != 1) { + stream << ", groups=" << this->options.groups(); + } + if (!this->options.bias()) { + stream << ", bias=" << std::boolalpha << false; + } + if (!std::get_if(&this->options.padding_mode())) { + stream << ", padding_mode=" + << enumtype::get_enum_name(this->options.padding_mode()); + } + stream << ")"; + } + + protected: + const ExpandingArray& padding() const { + return std::get>(this->options.padding()); + } + + std::vector _output_padding( + const Tensor& input, + const c10::optional& output_size, + const ExpandingArray& stride, + const ExpandingArray& padding, + const ExpandingArray& kernel_size); +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConvTranspose1d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the ConvTranspose1d function. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ConvTranspose1d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ConvTranspose1dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ConvTranspose1d model(ConvTranspose1dOptions(3, 2, +/// 3).stride(1).bias(false)); +/// ``` +class TORCH_API ConvTranspose1dImpl + : public ConvTransposeNdImpl<1, ConvTranspose1dImpl> { + public: + ConvTranspose1dImpl( + int64_t input_channels, + int64_t output_channels, + ExpandingArray<1> kernel_size) + : ConvTranspose1dImpl(ConvTranspose1dOptions( + input_channels, + output_channels, + kernel_size)) {} + explicit ConvTranspose1dImpl(ConvTranspose1dOptions options_); + Tensor forward( + const Tensor& input, + const c10::optional& output_size = c10::nullopt); + + protected: + FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(c10::optional())}) +}; + +/// A `ModuleHolder` subclass for `ConvTranspose1dImpl`. +/// See the documentation for `ConvTranspose1dImpl` class to learn what methods +/// it provides, and examples of how to use `ConvTranspose1d` with +/// `torch::nn::ConvTranspose1dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(ConvTranspose1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConvTranspose2d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the ConvTranspose2d function. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ConvTranspose2d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ConvTranspose2dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ConvTranspose2d model(ConvTranspose2dOptions(3, 2, +/// 3).stride(1).bias(false)); +/// ``` +class TORCH_API ConvTranspose2dImpl + : public ConvTransposeNdImpl<2, ConvTranspose2dImpl> { + public: + ConvTranspose2dImpl( + int64_t input_channels, + int64_t output_channels, + ExpandingArray<2> kernel_size) + : ConvTranspose2dImpl(ConvTranspose2dOptions( + input_channels, + output_channels, + kernel_size)) {} + explicit ConvTranspose2dImpl(ConvTranspose2dOptions options_); + Tensor forward( + const Tensor& input, + const c10::optional& output_size = c10::nullopt); + + protected: + FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(c10::optional())}) +}; + +/// A `ModuleHolder` subclass for `ConvTranspose2dImpl`. +/// See the documentation for `ConvTranspose2dImpl` class to learn what methods +/// it provides, and examples of how to use `ConvTranspose2d` with +/// `torch::nn::ConvTranspose2dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(ConvTranspose2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConvTranspose3d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the ConvTranspose3d function. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ConvTranspose3d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ConvTranspose3dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ConvTranspose3d model(ConvTranspose3dOptions(2, 2, +/// 2).stride(1).bias(false)); +/// ``` +class TORCH_API ConvTranspose3dImpl + : public ConvTransposeNdImpl<3, ConvTranspose3dImpl> { + public: + ConvTranspose3dImpl( + int64_t input_channels, + int64_t output_channels, + ExpandingArray<3> kernel_size) + : ConvTranspose3dImpl(ConvTranspose3dOptions( + input_channels, + output_channels, + kernel_size)) {} + explicit ConvTranspose3dImpl(ConvTranspose3dOptions options_); + Tensor forward( + const Tensor& input, + const c10::optional& output_size = c10::nullopt); + + protected: + FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(c10::optional())}) +}; + +/// A `ModuleHolder` subclass for `ConvTranspose3dImpl`. +/// See the documentation for `ConvTranspose3dImpl` class to learn what methods +/// it provides, and examples of how to use `ConvTranspose3d` with +/// `torch::nn::ConvTranspose3dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(ConvTranspose3d); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/distance.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/distance.h new file mode 100644 index 0000000000000000000000000000000000000000..93a872476436919c2dd2fd0ea9aad1a88bcd5589 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/distance.h @@ -0,0 +1,86 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace nn { + +/// Returns the cosine similarity between :math:`x_1` and :math:`x_2`, computed +/// along `dim`. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.CosineSimilarity to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::CosineSimilarityOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// CosineSimilarity model(CosineSimilarityOptions().dim(0).eps(0.5)); +/// ``` +class TORCH_API CosineSimilarityImpl : public Cloneable { + public: + explicit CosineSimilarityImpl(const CosineSimilarityOptions& options_ = {}); + + void reset() override; + + /// Pretty prints the `CosineSimilarity` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input1, const Tensor& input2); + + /// The options with which this `Module` was constructed. + CosineSimilarityOptions options; +}; + +/// A `ModuleHolder` subclass for `CosineSimilarityImpl`. +/// See the documentation for `CosineSimilarityImpl` class to learn what methods +/// it provides, and examples of how to use `CosineSimilarity` with +/// `torch::nn::CosineSimilarityOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(CosineSimilarity); + +// ============================================================================ + +/// Returns the batchwise pairwise distance between vectors :math:`v_1`, +/// :math:`v_2` using the p-norm. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.PairwiseDistance to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::PairwiseDistanceOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// PairwiseDistance +/// model(PairwiseDistanceOptions().p(3).eps(0.5).keepdim(true)); +/// ``` +class TORCH_API PairwiseDistanceImpl : public Cloneable { + public: + explicit PairwiseDistanceImpl(const PairwiseDistanceOptions& options_ = {}); + + void reset() override; + + /// Pretty prints the `PairwiseDistance` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input1, const Tensor& input2); + + /// The options with which this `Module` was constructed. + PairwiseDistanceOptions options; +}; + +/// A `ModuleHolder` subclass for `PairwiseDistanceImpl`. +/// See the documentation for `PairwiseDistanceImpl` class to learn what methods +/// it provides, and examples of how to use `PairwiseDistance` with +/// `torch::nn::PairwiseDistanceOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(PairwiseDistance); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/dropout.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/dropout.h new file mode 100644 index 0000000000000000000000000000000000000000..7cc7dfb80fbd27541301e6fb0c81930c549d91ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/dropout.h @@ -0,0 +1,190 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include + +namespace torch { +namespace nn { + +namespace detail { + +template +class _DropoutNd : public torch::nn::Cloneable { + public: + _DropoutNd(double p) : _DropoutNd(DropoutOptions().p(p)){}; + + explicit _DropoutNd(const DropoutOptions& options_ = {}) : options(options_) { + // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall) + reset(); + } + + void reset() override { + TORCH_CHECK( + options.p() >= 0. && options.p() <= 1., + "dropout probability has to be between 0 and 1, but got ", + options.p()); + } + + /// The options with which this `Module` was constructed. + DropoutOptions options; +}; + +} // namespace detail + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dropout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies dropout over a 1-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Dropout to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::DropoutOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Dropout model(DropoutOptions().p(0.42).inplace(true)); +/// ``` +class TORCH_API DropoutImpl : public detail::_DropoutNd { + public: + using detail::_DropoutNd::_DropoutNd; + + Tensor forward(Tensor input); + + /// Pretty prints the `Dropout` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; +}; + +/// A `ModuleHolder` subclass for `DropoutImpl`. +/// See the documentation for `DropoutImpl` class to learn what methods it +/// provides, and examples of how to use `Dropout` with +/// `torch::nn::DropoutOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Dropout); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dropout2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies dropout over a 2-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Dropout2d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::Dropout2dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Dropout2d model(Dropout2dOptions().p(0.42).inplace(true)); +/// ``` +class TORCH_API Dropout2dImpl : public detail::_DropoutNd { + public: + using detail::_DropoutNd::_DropoutNd; + + Tensor forward(Tensor input); + + /// Pretty prints the `Dropout2d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; +}; + +/// A `ModuleHolder` subclass for `Dropout2dImpl`. +/// See the documentation for `Dropout2dImpl` class to learn what methods it +/// provides, and examples of how to use `Dropout2d` with +/// `torch::nn::Dropout2dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Dropout2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dropout3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies dropout over a 3-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Dropout3d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::Dropout3dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Dropout3d model(Dropout3dOptions().p(0.42).inplace(true)); +/// ``` +class TORCH_API Dropout3dImpl : public detail::_DropoutNd { + public: + using detail::_DropoutNd::_DropoutNd; + + Tensor forward(Tensor input); + + /// Pretty prints the `Dropout3d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; +}; + +/// A `ModuleHolder` subclass for `Dropout3dImpl`. +/// See the documentation for `Dropout3dImpl` class to learn what methods it +/// provides, and examples of how to use `Dropout3d` with +/// `torch::nn::Dropout3dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Dropout3d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AlphaDropout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies Alpha Dropout over the input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.AlphaDropout to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::AlphaDropoutOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// AlphaDropout model(AlphaDropoutOptions(0.2).inplace(true)); +/// ``` +class TORCH_API AlphaDropoutImpl : public detail::_DropoutNd { + public: + using detail::_DropoutNd::_DropoutNd; + + Tensor forward(const Tensor& input); + + /// Pretty prints the `AlphaDropout` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; +}; + +/// A `ModuleHolder` subclass for `AlphaDropoutImpl`. +/// See the documentation for `AlphaDropoutImpl` class to learn what methods it +/// provides, and examples of how to use `AlphaDropout` with +/// `torch::nn::AlphaDropoutOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +TORCH_MODULE(AlphaDropout); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FeatureAlphaDropout +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// See the documentation for `torch::nn::FeatureAlphaDropoutOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// FeatureAlphaDropout model(FeatureAlphaDropoutOptions(0.2).inplace(true)); +/// ``` +class TORCH_API FeatureAlphaDropoutImpl + : public detail::_DropoutNd { + public: + using detail::_DropoutNd::_DropoutNd; + + Tensor forward(const Tensor& input); + + /// Pretty prints the `FeatureAlphaDropout` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; +}; + +/// A `ModuleHolder` subclass for `FeatureAlphaDropoutImpl`. +/// See the documentation for `FeatureAlphaDropoutImpl` class to learn what +/// methods it provides, and examples of how to use `FeatureAlphaDropout` with +/// `torch::nn::FeatureAlphaDropoutOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(FeatureAlphaDropout); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/embedding.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/embedding.h new file mode 100644 index 0000000000000000000000000000000000000000..fcaddd46e83b7fdac612eb72da14ef4d0d157948 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/embedding.h @@ -0,0 +1,171 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace nn { + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Embedding +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Performs a lookup in a fixed size embedding table. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Embedding to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::EmbeddingOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Embedding model(EmbeddingOptions(10, +/// 2).padding_idx(3).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true)); +/// ``` +class TORCH_API EmbeddingImpl : public torch::nn::Cloneable { + public: + EmbeddingImpl(int64_t num_embeddings, int64_t embedding_dim) + : EmbeddingImpl(EmbeddingOptions(num_embeddings, embedding_dim)) {} + explicit EmbeddingImpl(EmbeddingOptions options_); + + void reset() override; + + void reset_parameters(); + + /// Pretty prints the `Embedding` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// Performs a lookup on the embedding table stored in `weight` using the + /// `indices` supplied and returns the result. + Tensor forward(const Tensor& indices); + + /// The `Options` used to configure this `Embedding` module. + /// Changes to `EmbeddingOptions` *after construction* have no effect. + EmbeddingOptions options; + + /// The embedding table. + Tensor weight; +}; + +/// A `ModuleHolder` subclass for `EmbeddingImpl`. +/// See the documentation for `EmbeddingImpl` class to learn what methods it +/// provides, and examples of how to use `Embedding` with +/// `torch::nn::EmbeddingOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +class Embedding : public torch::nn::ModuleHolder { + public: + using torch::nn::ModuleHolder::ModuleHolder; + + /// See the documentation for `torch::nn::EmbeddingFromPretrainedOptions` + /// class to learn what optional arguments are supported for this function. + static Embedding from_pretrained( + const torch::Tensor& embeddings, + const EmbeddingFromPretrainedOptions& options = {}) { + TORCH_CHECK( + embeddings.dim() == 2, + "Embeddings parameter is expected to be 2-dimensional"); + + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int64_t rows, cols; + rows = embeddings.size(0); + cols = embeddings.size(1); + + Embedding embedding(EmbeddingOptions(rows, cols) + ._weight(embeddings) + .padding_idx(options.padding_idx()) + .max_norm(options.max_norm()) + .norm_type(options.norm_type()) + .scale_grad_by_freq(options.scale_grad_by_freq()) + .sparse(options.sparse())); + embedding->weight.set_requires_grad(!options.freeze()); + return embedding; + } +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ EmbeddingBag +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Computes sums or means of 'bags' of embeddings, without instantiating the +/// intermediate embeddings. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.EmbeddingBag to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::EmbeddingBagOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// EmbeddingBag model(EmbeddingBagOptions(10, +/// 2).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true).mode(torch::kSum).padding_idx(1)); +/// ``` +class TORCH_API EmbeddingBagImpl + : public torch::nn::Cloneable { + public: + EmbeddingBagImpl(int64_t num_embeddings, int64_t embedding_dim) + : EmbeddingBagImpl(EmbeddingBagOptions(num_embeddings, embedding_dim)) {} + explicit EmbeddingBagImpl(EmbeddingBagOptions options_); + + void reset() override; + + void reset_parameters(); + + /// Pretty prints the `EmbeddingBag` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The `Options` used to configure this `EmbeddingBag` module. + EmbeddingBagOptions options; + /// The embedding table. + Tensor weight; + + Tensor forward( + const Tensor& input, + const Tensor& offsets = {}, + const Tensor& per_sample_weights = {}); + + protected: + FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())}, {2, AnyValue(Tensor())}) +}; + +/// A `ModuleHolder` subclass for `EmbeddingBagImpl`. +/// See the documentation for `EmbeddingBagImpl` class to learn what methods it +/// provides, and examples of how to use `EmbeddingBag` with +/// `torch::nn::EmbeddingBagOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +class EmbeddingBag : public torch::nn::ModuleHolder { + public: + using torch::nn::ModuleHolder::ModuleHolder; + + /// See the documentation for `torch::nn::EmbeddingBagFromPretrainedOptions` + /// class to learn what optional arguments are supported for this function. + static EmbeddingBag from_pretrained( + const torch::Tensor& embeddings, + const EmbeddingBagFromPretrainedOptions& options = {}) { + TORCH_CHECK( + embeddings.dim() == 2, + "Embeddings parameter is expected to be 2-dimensional"); + + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int64_t rows, cols; + rows = embeddings.size(0); + cols = embeddings.size(1); + + EmbeddingBag embeddingbag( + EmbeddingBagOptions(rows, cols) + ._weight(embeddings) + .max_norm(options.max_norm()) + .norm_type(options.norm_type()) + .scale_grad_by_freq(options.scale_grad_by_freq()) + .mode(options.mode()) + .sparse(options.sparse()) + .padding_idx(options.padding_idx())); + embeddingbag->weight.set_requires_grad(!options.freeze()); + return embeddingbag; + } +}; +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/fold.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/fold.h new file mode 100644 index 0000000000000000000000000000000000000000..da16381058a858aa16a9e7941740e58748f15e4a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/fold.h @@ -0,0 +1,87 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Applies fold over a 3-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Fold to learn about +/// the exact behavior of this module. +/// +/// See the documentation for `torch::nn::FoldOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Fold model(FoldOptions({8, 8}, {3, 3}).dilation(2).padding({2, +/// 1}).stride(2)); +/// ``` +class TORCH_API FoldImpl : public torch::nn::Cloneable { + public: + FoldImpl(ExpandingArray<2> output_size, ExpandingArray<2> kernel_size) + : FoldImpl(FoldOptions(output_size, kernel_size)) {} + explicit FoldImpl(const FoldOptions& options_); + + void reset() override; + + /// Pretty prints the `Fold` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input); + + /// The options with which this `Module` was constructed. + FoldOptions options; +}; + +/// A `ModuleHolder` subclass for `FoldImpl`. +/// See the documentation for `FoldImpl` class to learn what methods it +/// provides, and examples of how to use `Fold` with `torch::nn::FoldOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(Fold); + +// ============================================================================ + +/// Applies unfold over a 4-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Unfold to learn about +/// the exact behavior of this module. +/// +/// See the documentation for `torch::nn::UnfoldOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Unfold model(UnfoldOptions({2, 4}).dilation(2).padding({2, 1}).stride(2)); +/// ``` +class TORCH_API UnfoldImpl : public Cloneable { + public: + UnfoldImpl(ExpandingArray<2> kernel_size) + : UnfoldImpl(UnfoldOptions(kernel_size)) {} + explicit UnfoldImpl(const UnfoldOptions& options_); + + void reset() override; + + /// Pretty prints the `Unfold` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input); + + /// The options with which this `Module` was constructed. + UnfoldOptions options; +}; + +/// A `ModuleHolder` subclass for `UnfoldImpl`. +/// See the documentation for `UnfoldImpl` class to learn what methods it +/// provides, and examples of how to use `Unfold` with +/// `torch::nn::UnfoldOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Unfold); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/instancenorm.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/instancenorm.h new file mode 100644 index 0000000000000000000000000000000000000000..b29ad007de7355846850aa8a17a2a554b8294be6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/instancenorm.h @@ -0,0 +1,153 @@ +#pragma once + +#include +#include + +namespace torch { +namespace nn { + +/// Base class for all (dimension-specialized) instance norm modules +template +class InstanceNormImpl + : public torch::nn::NormImplBase { + private: + inline Tensor apply_instance_norm(const Tensor& input) { + return torch::nn::functional::detail::instance_norm( + input, + this->running_mean, + this->running_var, + this->weight, + this->bias, + this->is_training() || !this->options.track_running_stats(), + this->options.momentum(), + this->options.eps()); + } + + inline Tensor handle_no_batch_input(const Tensor& input) { + return this->apply_instance_norm(input.unsqueeze(0)).squeeze(0); + } + + public: + using torch::nn::NormImplBase::NormImplBase; + + Tensor forward(const Tensor& input) { + this->_check_input_dim(input); + + // For InstanceNorm1D, 2D is unbatched and 3D is batched + // For InstanceNorm2D, 3D is unbatched and 4D is batched + // For InstanceNorm3D, 4D is unbatched and 5D is batched + // check if input does not have a batch-dim + if (input.dim() == D + 1) { + return this->handle_no_batch_input(input); + } + + return this->apply_instance_norm(input); + } + + /// Pretty prints the `InstanceNorm{1,2,3}d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override { + stream << std::boolalpha << "torch::nn::InstanceNorm" << D << "d(" + << this->options.num_features() << ", " + << "eps=" << this->options.eps() << ", " + << "momentum=" << this->options.momentum() << ", " + << "affine=" << this->options.affine() << ", " + << "track_running_stats=" << this->options.track_running_stats() + << ")"; + } +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ InstanceNorm1d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the InstanceNorm1d function. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.InstanceNorm1d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::InstanceNorm1dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// InstanceNorm1d +/// model(InstanceNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +class TORCH_API InstanceNorm1dImpl + : public InstanceNormImpl<1, InstanceNorm1dImpl> { + protected: + void _check_input_dim(const Tensor& input) override; + + public: + using InstanceNormImpl<1, InstanceNorm1dImpl>::InstanceNormImpl; +}; + +/// A `ModuleHolder` subclass for `InstanceNorm1dImpl`. +/// See the documentation for `InstanceNorm1dImpl` class to learn what methods +/// it provides, and examples of how to use `InstanceNorm1d` with +/// `torch::nn::InstanceNorm1dOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +TORCH_MODULE(InstanceNorm1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ InstanceNorm2d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the InstanceNorm2d function. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.InstanceNorm2d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::InstanceNorm2dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// InstanceNorm2d +/// model(InstanceNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +class TORCH_API InstanceNorm2dImpl + : public InstanceNormImpl<2, InstanceNorm2dImpl> { + protected: + void _check_input_dim(const Tensor& input) override; + + public: + using InstanceNormImpl<2, InstanceNorm2dImpl>::InstanceNormImpl; +}; + +/// A `ModuleHolder` subclass for `InstanceNorm2dImpl`. +/// See the documentation for `InstanceNorm2dImpl` class to learn what methods +/// it provides, and examples of how to use `InstanceNorm2d` with +/// `torch::nn::InstanceNorm2dOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +TORCH_MODULE(InstanceNorm2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ InstanceNorm3d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the InstanceNorm3d function. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.InstanceNorm3d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::InstanceNorm3dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// InstanceNorm3d +/// model(InstanceNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +class TORCH_API InstanceNorm3dImpl + : public InstanceNormImpl<3, InstanceNorm3dImpl> { + protected: + void _check_input_dim(const Tensor& input) override; + + public: + using InstanceNormImpl<3, InstanceNorm3dImpl>::InstanceNormImpl; +}; + +/// A `ModuleHolder` subclass for `InstanceNorm3dImpl`. +/// See the documentation for `InstanceNorm3dImpl` class to learn what methods +/// it provides, and examples of how to use `InstanceNorm3d` with +/// `torch::nn::InstanceNorm3dOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +TORCH_MODULE(InstanceNorm3d); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/linear.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/linear.h new file mode 100644 index 0000000000000000000000000000000000000000..a58fdb36b43df9158bfd4513a2ace6bbf5e8d5f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/linear.h @@ -0,0 +1,214 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace nn { + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Identity ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// A placeholder identity operator that is argument-insensitive. +/// See https://pytorch.org/docs/master/generated/torch.nn.Identity.html to +/// learn about the exact behavior of this module. +class TORCH_API IdentityImpl : public Cloneable { + public: + void reset() override; + + /// Pretty prints the `Identity` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `IdentityImpl`. +/// See the documentation for `IdentityImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(Identity); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Linear ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies a linear transformation with optional bias. +/// See https://pytorch.org/docs/master/generated/torch.nn.Linear.html to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::LinearOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Linear model(LinearOptions(5, 2).bias(false)); +/// ``` +class TORCH_API LinearImpl : public Cloneable { + public: + LinearImpl(int64_t in_features, int64_t out_features) + : LinearImpl(LinearOptions(in_features, out_features)) {} + explicit LinearImpl(const LinearOptions& options_); + + void reset() override; + + void reset_parameters(); + + /// Pretty prints the `Linear` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// Transforms the `input` tensor by multiplying with the `weight` and + /// optionally adding the `bias`, if `with_bias` is true in the options. + Tensor forward(const Tensor& input); + + /// The options used to configure this module. + LinearOptions options; + + /// The learned weight. + Tensor weight; + + /// The learned bias. If `bias` is false in the `options`, this tensor is + /// undefined. + Tensor bias; +}; + +/// A `ModuleHolder` subclass for `LinearImpl`. +/// See the documentation for `LinearImpl` class to learn what methods it +/// provides, and examples of how to use `Linear` with +/// `torch::nn::LinearOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Linear); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Flatten ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// A placeholder for Flatten operator +/// See https://pytorch.org/docs/master/generated/torch.nn.Flatten.html to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::FlattenOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Flatten model(FlattenOptions().start_dim(2).end_dim(4)); +/// ``` +class TORCH_API FlattenImpl : public Cloneable { + public: + explicit FlattenImpl(const FlattenOptions& options_ = {}); + + void reset() override; + + /// Pretty prints the `Flatten` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// Applies a flatten transform on the `input`. + Tensor forward(const Tensor& input); + + /// The options used to configure this module. + FlattenOptions options; +}; + +/// A `ModuleHolder` subclass for `FlattenImpl`. +/// See the documentation for `FlattenImpl` class to learn what methods it +/// provides, and examples of how to use `Flatten` with +/// `torch::nn::FlattenOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Flatten); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Unflatten +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// A placeholder for unflatten operator +/// See https://pytorch.org/docs/master/generated/torch.nn.Unflatten.html to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::UnflattenOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Unflatten model(UnflattenOptions(0, {2, 2})); +/// Unflatten model(UnflattenOptions("B", {{"B1", 2}, {"B2", 2}})); +/// ``` +class TORCH_API UnflattenImpl : public Cloneable { + public: + UnflattenImpl(int64_t dim, std::vector sizes) + : UnflattenImpl(UnflattenOptions(dim, sizes)) {} + UnflattenImpl(std::string dimname, UnflattenOptions::namedshape_t namedshape) + : UnflattenImpl(UnflattenOptions(dimname, namedshape)) {} + explicit UnflattenImpl(UnflattenOptions options_); + + void reset() override; + + /// Pretty prints the `Unflatten` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// Applies an unflatten transform on the `input`. + Tensor forward(const Tensor& input); + + /// The options used to configure this module. + UnflattenOptions options; +}; + +/// A `ModuleHolder` subclass for `UnflattenImpl`. +/// See the documentation for `UnflattenImpl` class to learn what methods it +/// provides, and examples of how to use `Unflatten` with +/// `torch::nn::UnflattenOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Unflatten); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Bilinear ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies a billinear transformation with optional bias. +/// See https://pytorch.org/docs/master/generated/torch.nn.Bilinear.html to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::BilinearOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Bilinear model(BilinearOptions(3, 2, 4).bias(false)); +/// ``` +class TORCH_API BilinearImpl : public Cloneable { + public: + BilinearImpl(int64_t in1_features, int64_t in2_features, int64_t out_features) + : BilinearImpl( + BilinearOptions(in1_features, in2_features, out_features)) {} + explicit BilinearImpl(const BilinearOptions& options_); + + void reset() override; + + void reset_parameters(); + + /// Pretty prints the `Bilinear` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// Applies a bilinear transform on the `input1` and `input2` tensor by + /// multiplying with the `weight` and optionally adding the `bias`, if + /// `with_bias` is true in the options. + Tensor forward(const Tensor& input1, const Tensor& input2); + + /// The options used to configure this module. + BilinearOptions options; + + /// The learned weight. + Tensor weight; + + /// The learned bias. If `with_bias` is false in the `options`, this tensor is + /// undefined. + Tensor bias; +}; + +/// A `ModuleHolder` subclass for `BilinearImpl`. +/// See the documentation for `BilinearImpl` class to learn what methods it +/// provides, and examples of how to use `Bilinear` with +/// `torch::nn::BilinearOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Bilinear); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/loss.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/loss.h new file mode 100644 index 0000000000000000000000000000000000000000..f34cfbf593340d8aa17fb701557f7bc8080f41d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/loss.h @@ -0,0 +1,805 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace torch { +namespace nn { + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ L1Loss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that measures the mean absolute error (MAE) between each +/// element in the input : math :`x` and target : `y`. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.L1Loss to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::L1LossOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// L1Loss model(L1LossOptions(torch::kNone)); +/// ``` +struct TORCH_API L1LossImpl : Cloneable { + explicit L1LossImpl(L1LossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `L1Loss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + L1LossOptions options; +}; + +/// A `ModuleHolder` subclass for `L1LossImpl`. +/// See the documentation for `L1LossImpl` class to learn what methods it +/// provides, and examples of how to use `L1Loss` with +/// `torch::nn::L1LossOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(L1Loss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ KLDivLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// The Kullback-Leibler divergence loss measure +/// See https://pytorch.org/docs/master/nn.html#torch.nn.KLDivLoss to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::KLDivLossOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// KLDivLoss model(KLDivLossOptions().reduction(torch::kNone)); +/// ``` +struct TORCH_API KLDivLossImpl : Cloneable { + explicit KLDivLossImpl(KLDivLossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `KLDivLoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + KLDivLossOptions options; +}; + +/// A `ModuleHolder` subclass for `KLDivLossImpl`. +/// See the documentation for `KLDivLossImpl` class to learn what methods it +/// provides, and examples of how to use `KLDivLoss` with +/// `torch::nn::KLDivLossOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(KLDivLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MSELoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that measures the mean squared error (squared L2 norm) +/// between each element in the input :math:`x` and target :math:`y`. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.MSELoss to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::MSELossOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// MSELoss model(MSELossOptions(torch::kNone)); +/// ``` +struct TORCH_API MSELossImpl : Cloneable { + explicit MSELossImpl(MSELossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `MSELoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + MSELossOptions options; +}; + +/// A `ModuleHolder` subclass for `MSELossImpl`. +/// See the documentation for `MSELossImpl` class to learn what methods it +/// provides, and examples of how to use `MSELoss` with +/// `torch::nn::MSELossOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(MSELoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BCELoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that measures the Binary Cross Entropy +/// between the target and the output. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.BCELoss to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::BCELossOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// BCELoss model(BCELossOptions().reduction(torch::kNone).weight(weight)); +/// ``` +struct TORCH_API BCELossImpl : Cloneable { + explicit BCELossImpl(BCELossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `BCELoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + BCELossOptions options; +}; + +/// A `ModuleHolder` subclass for `BCELossImpl`. +/// See the documentation for `BCELossImpl` class to learn what methods it +/// provides, and examples of how to use `BCELoss` with +/// `torch::nn::BCELossOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(BCELoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HingeEmbeddingLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that measures the loss given an input tensor :math:`x` +/// and a labels tensor :math:`y` (containing 1 or -1). +/// See https://pytorch.org/docs/master/nn.html#torch.nn.HingeEmbeddingLoss to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::HingeEmbeddingLossOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// HingeEmbeddingLoss +/// model(HingeEmbeddingLossOptions().margin(4).reduction(torch::kNone)); +/// ``` +struct TORCH_API HingeEmbeddingLossImpl : Cloneable { + explicit HingeEmbeddingLossImpl(HingeEmbeddingLossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `HingeEmbeddingLoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + HingeEmbeddingLossOptions options; +}; + +/// A `ModuleHolder` subclass for `HingeEmbeddingLossImpl`. +/// See the documentation for `HingeEmbeddingLossImpl` class to learn what +/// methods it provides, and examples of how to use `HingeEmbeddingLoss` with +/// `torch::nn::HingeEmbeddingLossOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(HingeEmbeddingLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MultiMarginLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that optimizes a multi-class classification hinge +/// loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) +/// and output :math:`y` (which is a 1D tensor of target class indices, :math:`0 +/// \leq y \leq \text{x.size}(1)-1`). See +/// https://pytorch.org/docs/master/nn.html#torch.nn.MultiMarginLoss to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::MultiMarginLossOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// MultiMarginLoss model(MultiMarginLossOptions().margin(2).weight(weight)); +/// ``` +struct TORCH_API MultiMarginLossImpl : public Cloneable { + explicit MultiMarginLossImpl(MultiMarginLossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `MultiMarginLoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + MultiMarginLossOptions options; +}; + +/// A `ModuleHolder` subclass for `MultiMarginLossImpl`. +/// See the documentation for `MultiMarginLossImpl` class to learn what methods +/// it provides, and examples of how to use `MultiMarginLoss` with +/// `torch::nn::MultiMarginLossOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(MultiMarginLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CosineEmbeddingLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that measures the loss given input tensors +/// `input1`, `input2`, and a `Tensor` label `target` with values 1 or +/// -1. This is used for measuring whether two inputs are similar or +/// dissimilar, using the cosine distance, and is typically used for learning +/// nonlinear embeddings or semi-supervised learning. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.CosineEmbeddingLoss to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::CosineEmbeddingLossOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// CosineEmbeddingLoss model(CosineEmbeddingLossOptions().margin(0.5)); +/// ``` +struct TORCH_API CosineEmbeddingLossImpl + : public Cloneable { + explicit CosineEmbeddingLossImpl(CosineEmbeddingLossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `CosineEmbeddingLoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward( + const Tensor& input1, + const Tensor& input2, + const Tensor& target); + + /// The options with which this `Module` was constructed. + CosineEmbeddingLossOptions options; +}; + +/// A `ModuleHolder` subclass for `CosineEmbeddingLossImpl`. +/// See the documentation for `CosineEmbeddingLossImpl` class to learn what +/// methods it provides, and examples of how to use `CosineEmbeddingLoss` with +/// `torch::nn::CosineEmbeddingLossOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(CosineEmbeddingLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SmoothL1Loss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that uses a squared term if the absolute +/// element-wise error falls below beta and an L1 term otherwise. +/// It is less sensitive to outliers than the `MSELoss` and in some cases +/// prevents exploding gradients (e.g. see the paper `Fast R-CNN` by Ross +/// Girshick). See https://pytorch.org/docs/master/nn.html#torch.nn.SmoothL1Loss +/// to learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::SmoothL1LossOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// SmoothL1Loss model(SmoothL1LossOptions().reduction(torch::kNone).beta(0.5)); +/// ``` +struct TORCH_API SmoothL1LossImpl : public Cloneable { + explicit SmoothL1LossImpl(SmoothL1LossOptions options = {}); + + void reset() override; + + /// Pretty prints the `L1Loss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + SmoothL1LossOptions options; +}; + +/// A `ModuleHolder` subclass for `SmoothL1LossImpl`. +/// See the documentation for `SmoothL1LossImpl` class to learn what methods it +/// provides, and examples of how to use `SmoothL1Loss` with +/// `torch::nn::SmoothL1LossOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +TORCH_MODULE(SmoothL1Loss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HuberLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that uses a squared term if the absolute +/// element-wise error falls below delta and a delta-scaled L1 term otherwise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.HuberLoss to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::HuberLossOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// HuberLoss model(HuberLossOptions().reduction(torch::kNone).delta(0.5)); +/// ``` +struct TORCH_API HuberLossImpl : public Cloneable { + explicit HuberLossImpl(HuberLossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `HuberLoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + HuberLossOptions options; +}; + +/// A `ModuleHolder` subclass for `HuberLossImpl`. +/// See the documentation for `HuberLossImpl` class to learn what methods it +/// provides, and examples of how to use `HuberLoss` with +/// `torch::nn::HuberLossOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(HuberLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MultiLabelMarginLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that optimizes a multi-class multi-classification +/// hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch +/// `Tensor`) and output :math:`y` (which is a 2D `Tensor` of target class +/// indices). See +/// https://pytorch.org/docs/master/nn.html#torch.nn.MultiLabelMarginLoss to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::MultiLabelMarginLossOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// MultiLabelMarginLoss model(MultiLabelMarginLossOptions(torch::kNone)); +/// ``` +struct TORCH_API MultiLabelMarginLossImpl + : public Cloneable { + explicit MultiLabelMarginLossImpl(MultiLabelMarginLossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `L1Loss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + MultiLabelMarginLossOptions options; +}; + +/// A `ModuleHolder` subclass for `MultiLabelMarginLossImpl`. +/// See the documentation for `MultiLabelMarginLossImpl` class to learn what +/// methods it provides, and examples of how to use `MultiLabelMarginLoss` with +/// `torch::nn::MultiLabelMarginLossOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(MultiLabelMarginLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SoftMarginLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that optimizes a two-class classification +/// logistic loss between input tensor :math:`x` and target tensor :math:`y` +/// (containing 1 or -1). +/// See https://pytorch.org/docs/master/nn.html#torch.nn.SoftMarginLoss to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::SoftMarginLossOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// SoftMarginLoss model(SoftMarginLossOptions(torch::kNone)); +/// ``` +struct TORCH_API SoftMarginLossImpl : public Cloneable { + explicit SoftMarginLossImpl(SoftMarginLossOptions options_ = {}); + + /// Pretty prints the `SoftMarginLoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + void reset() override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + SoftMarginLossOptions options; +}; + +/// A `ModuleHolder` subclass for `SoftMarginLossImpl`. +/// See the documentation for `SoftMarginLossImpl` class to learn what methods +/// it provides, and examples of how to use `SoftMarginLoss` with +/// `torch::nn::SoftMarginLossOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +TORCH_MODULE(SoftMarginLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MultiLabelSoftMarginLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that optimizes a multi-label one-versus-all +/// loss based on max-entropy, between input :math:`x` and target :math:`y` of +/// size :math:`(N, C)`. See +/// https://pytorch.org/docs/master/nn.html#torch.nn.MultiLabelSoftMarginLoss to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::MultiLabelSoftMarginLossOptions` class +/// to learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// MultiLabelSoftMarginLoss +/// model(MultiLabelSoftMarginLossOptions().reduction(torch::kNone).weight(weight)); +/// ``` +struct TORCH_API MultiLabelSoftMarginLossImpl + : public Cloneable { + explicit MultiLabelSoftMarginLossImpl( + MultiLabelSoftMarginLossOptions options_ = {}); + + /// Pretty prints the `MultiLabelSoftMarginLoss` module into the given + /// `stream`. + void pretty_print(std::ostream& stream) const override; + + void reset() override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + MultiLabelSoftMarginLossOptions options; +}; + +/// A `ModuleHolder` subclass for `MultiLabelSoftMarginLossImpl`. +/// See the documentation for `MultiLabelSoftMarginLossImpl` class to learn what +/// methods it provides, and examples of how to use `MultiLabelSoftMarginLoss` +/// with `torch::nn::MultiLabelSoftMarginLossOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(MultiLabelSoftMarginLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TripletMarginLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that measures the triplet loss given an input +/// tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater +/// than :math:`0`. This is used for measuring a relative similarity between +/// samples. A triplet is composed by `a`, `p` and `n` (i.e., `anchor`, +/// `positive examples` and `negative examples` respectively). The +/// shapes of all input tensors should be :math:`(N, D)`. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.TripletMarginLoss to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::TripletMarginLossOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// TripletMarginLoss +/// model(TripletMarginLossOptions().margin(3).p(2).eps(1e-06).swap(false)); +/// ``` +struct TORCH_API TripletMarginLossImpl + : public Cloneable { + explicit TripletMarginLossImpl(TripletMarginLossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `TripletMarginLoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward( + const Tensor& anchor, + const Tensor& positive, + const Tensor& negative); + + /// The options with which this `Module` was constructed. + TripletMarginLossOptions options; +}; + +/// A `ModuleHolder` subclass for `TripletMarginLossImpl`. +/// See the documentation for `TripletMarginLossImpl` class to learn what +/// methods it provides, and examples of how to use `TripletMarginLoss` with +/// `torch::nn::TripletMarginLossOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(TripletMarginLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TripletMarginWithDistanceLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that measures the triplet loss given input +/// tensors :math:`a`, :math:`p`, and :math:`n` (representing anchor, +/// positive, and negative examples, respectively); and a nonnegative, +/// real-valued function +/// ("distance function") used to compute the relationships between the anchor +/// and positive example ("positive distance") and the anchor and negative +/// example ("negative distance"). +/// See +/// https://pytorch.org/docs/master/nn.html#torch.nn.TripletMarginWithDistanceLoss +/// to learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::TripletMarginWithDistanceLossOptions` +/// class to learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// TripletMarginWithDistanceLoss +/// model(TripletMarginWithDistanceLossOptions().margin(3).swap(false)); +/// ``` +struct TORCH_API TripletMarginWithDistanceLossImpl + : public Cloneable { + explicit TripletMarginWithDistanceLossImpl( + TripletMarginWithDistanceLossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `TripletMarginWithDistanceLoss` module into the given + /// `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward( + const Tensor& anchor, + const Tensor& positive, + const Tensor& negative); + + /// The options with which this `Module` was constructed. + TripletMarginWithDistanceLossOptions options; +}; + +/// A `ModuleHolder` subclass for `TripletMarginWithDistanceLossImpl`. +/// See the documentation for `TripletMarginWithDistanceLossImpl` class to learn +/// what methods it provides, and examples of how to use +/// `TripletMarginWithDistanceLoss` with +/// `torch::nn::TripletMarginWithDistanceLossOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(TripletMarginWithDistanceLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CTCLoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// The Connectionist Temporal Classification loss. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.CTCLoss to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::CTCLossOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// CTCLoss +/// model(CTCLossOptions().blank(42).zero_infinity(false).reduction(torch::kSum)); +/// ``` +struct TORCH_API CTCLossImpl : public Cloneable { + explicit CTCLossImpl(CTCLossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `CTCLoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward( + const Tensor& log_probs, + const Tensor& targets, + const Tensor& input_lengths, + const Tensor& target_lengths); + + /// The options with which this `Module` was constructed. + CTCLossOptions options; +}; + +/// A `ModuleHolder` subclass for `CTCLossImpl`. +/// See the documentation for `CTCLossImpl` class to learn what methods it +/// provides, and examples of how to use `CTCLoss` with +/// `torch::nn::CTCLossOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(CTCLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PoissonNLLLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Negative log likelihood loss with Poisson distribution of target. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.PoissonNLLLoss to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::PoissonNLLLossOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// PoissonNLLLoss +/// model(PoissonNLLLossOptions().log_input(false).full(true).eps(0.42).reduction(torch::kSum)); +/// ``` +struct TORCH_API PoissonNLLLossImpl : public Cloneable { + explicit PoissonNLLLossImpl(PoissonNLLLossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `PoissonNLLLoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& log_input, const Tensor& targets); + + /// The options with which this `Module` was constructed. + PoissonNLLLossOptions options; +}; + +/// A `ModuleHolder` subclass for `PoissonNLLLossImpl`. +/// See the documentation for `PoissonNLLLossImpl` class to learn what methods +/// it provides, and examples of how to use `PoissonNLLLoss` with +/// `torch::nn::PoissonNLLLossOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +TORCH_MODULE(PoissonNLLLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MarginRankingLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that measures the loss given +/// inputs :math:`x1`, :math:`x2`, two 1D mini-batch `Tensors`, +/// and a label 1D mini-batch tensor :math:`y` (containing 1 or -1). +/// See https://pytorch.org/docs/master/nn.html#torch.nn.MarginRankingLoss to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::MarginRankingLossOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// MarginRankingLoss +/// model(MarginRankingLossOptions().margin(0.5).reduction(torch::kSum)); +/// ``` +struct TORCH_API MarginRankingLossImpl + : public Cloneable { + explicit MarginRankingLossImpl(MarginRankingLossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `MarginRankingLoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward( + const Tensor& input1, + const Tensor& input2, + const Tensor& targets); + + /// The options with which this `Module` was constructed. + MarginRankingLossOptions options; +}; + +/// A `ModuleHolder` subclass for `MarginRankingLossImpl`. +/// See the documentation for `MarginRankingLossImpl` class to learn what +/// methods it provides, and examples of how to use `MarginRankingLoss` with +/// `torch::nn::MarginRankingLossOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(MarginRankingLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NLLLoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// The negative log likelihood loss. It is useful to train a classification +/// problem with `C` classes. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.NLLLoss to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::NLLLossOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// NLLLoss model(NLLLossOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +struct TORCH_API NLLLossImpl : public Cloneable { + explicit NLLLossImpl(NLLLossOptions options_ = {}); + + /// Pretty prints the `NLLLoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + void reset() override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + NLLLossOptions options; + + /// A manual rescaling weight given to to each class. + Tensor weight; +}; + +/// A `ModuleHolder` subclass for `NLLLossImpl`. +/// See the documentation for `NLLLossImpl` class to learn what methods it +/// provides, and examples of how to use `NLLLoss` with +/// `torch::nn::NLLLossOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(NLLLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CrossEntropyLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Creates a criterion that computes cross entropy loss between input and +/// target. See +/// https://pytorch.org/docs/master/nn.html#torch.nn.CrossEntropyLoss to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::CrossEntropyLossOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// CrossEntropyLoss +/// model(CrossEntropyLossOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +struct TORCH_API CrossEntropyLossImpl : public Cloneable { + explicit CrossEntropyLossImpl(CrossEntropyLossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `CrossEntropyLoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + CrossEntropyLossOptions options; + + /// A manual rescaling weight given to to each class. + Tensor weight; +}; + +/// A `ModuleHolder` subclass for `CrossEntropyLossImpl`. +/// See the documentation for `CrossEntropyLossImpl` class to learn what methods +/// it provides, and examples of how to use `CrossEntropyLoss` with +/// `torch::nn::CrossEntropyLossOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(CrossEntropyLoss); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BCEWithLogitsLoss +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// This loss combines a `Sigmoid` layer and the `BCELoss` in one single +/// class. This version is more numerically stable than using a plain `Sigmoid` +/// followed by a `BCELoss` as, by combining the operations into one layer, +/// we take advantage of the log-sum-exp trick for numerical stability. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.BCEWithLogitsLoss to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::BCEWithLogitsLossOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// BCEWithLogitsLoss +/// model(BCEWithLogitsLossOptions().reduction(torch::kNone).weight(weight)); +/// ``` +struct TORCH_API BCEWithLogitsLossImpl + : public Cloneable { + explicit BCEWithLogitsLossImpl(BCEWithLogitsLossOptions options_ = {}); + + void reset() override; + + /// Pretty prints the `BCEWithLogitsLoss` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input, const Tensor& target); + + /// The options with which this `Module` was constructed. + BCEWithLogitsLossOptions options; + + /// A manual rescaling weight given to the loss of each batch element. + Tensor weight; + + /// A weight of positive examples. + Tensor pos_weight; +}; + +/// A `ModuleHolder` subclass for `BCEWithLogitsLossImpl`. +/// See the documentation for `BCEWithLogitsLossImpl` class to learn what +/// methods it provides, and examples of how to use `BCEWithLogitsLoss` with +/// `torch::nn::BCEWithLogitsLossOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(BCEWithLogitsLoss); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/normalization.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/normalization.h new file mode 100644 index 0000000000000000000000000000000000000000..2f748ef79d0bc551e0c351a430a3cca1a1746efa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/normalization.h @@ -0,0 +1,198 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace nn { + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LayerNorm ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies Layer Normalization over a mini-batch of inputs as described in +/// the paper `Layer Normalization`_ . +/// See https://pytorch.org/docs/master/nn.html#torch.nn.LayerNorm to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::LayerNormOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// LayerNorm model(LayerNormOptions({2, +/// 2}).elementwise_affine(false).eps(2e-5)); +/// ``` +class TORCH_API LayerNormImpl : public torch::nn::Cloneable { + public: + LayerNormImpl(std::vector normalized_shape) + : LayerNormImpl(LayerNormOptions(normalized_shape)) {} + explicit LayerNormImpl(LayerNormOptions options_); + + void reset() override; + + void reset_parameters(); + + /// Pretty prints the `LayerNorm` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// Applies layer normalization over a mini-batch of inputs as described in + /// the paper `Layer Normalization`_ . + /// + /// The mean and standard-deviation are calculated separately over the last + /// certain number dimensions which have to be of the shape specified by + /// input `normalized_shape`. + /// + /// `Layer Normalization`: https://arxiv.org/abs/1607.06450 + Tensor forward(const Tensor& input); + + /// The options with which this module was constructed. + LayerNormOptions options; + + /// The learned weight. + /// Initialized to ones if the `elementwise_affine` option is set to `true` + /// upon construction. + Tensor weight; + + /// The learned bias. + /// Initialized to zeros `elementwise_affine` option is set to `true` upon + /// construction. + Tensor bias; +}; + +/// A `ModuleHolder` subclass for `LayerNormImpl`. +/// See the documentation for `LayerNormImpl` class to learn what methods it +/// provides, and examples of how to use `LayerNorm` with +/// `torch::nn::LayerNormOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(LayerNorm); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LocalResponseNorm +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies local response normalization over an input signal composed +/// of several input planes, where channels occupy the second dimension. +/// Applies normalization across channels. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.LocalResponseNorm to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::LocalResponseNormOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// LocalResponseNorm +/// model(LocalResponseNormOptions(2).alpha(0.0002).beta(0.85).k(2.)); +/// ``` +class TORCH_API LocalResponseNormImpl + : public Cloneable { + public: + LocalResponseNormImpl(int64_t size) + : LocalResponseNormImpl(LocalResponseNormOptions(size)) {} + explicit LocalResponseNormImpl(const LocalResponseNormOptions& options_); + + Tensor forward(const Tensor& input); + + void reset() override; + + /// Pretty prints the `LocalResponseNormImpl` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + LocalResponseNormOptions options; +}; + +/// A `ModuleHolder` subclass for `LocalResponseNormImpl`. +/// See the documentation for `LocalResponseNormImpl` class to learn what +/// methods it provides, and examples of how to use `LocalResponseNorm` with +/// `torch::nn::LocalResponseNormOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(LocalResponseNorm); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CrossMapLRN2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// See the documentation for `torch::nn::CrossMapLRN2dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// CrossMapLRN2d model(CrossMapLRN2dOptions(3).alpha(1e-5).beta(0.1).k(10)); +/// ``` +class TORCH_API CrossMapLRN2dImpl + : public torch::nn::Cloneable { + public: + CrossMapLRN2dImpl(int64_t size) + : CrossMapLRN2dImpl(CrossMapLRN2dOptions(size)) {} + explicit CrossMapLRN2dImpl(const CrossMapLRN2dOptions& options_) + : options(options_) {} + + void reset() override; + + /// Pretty prints the `CrossMapLRN2d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + torch::Tensor forward(const torch::Tensor& input); + + CrossMapLRN2dOptions options; +}; + +/// A `ModuleHolder` subclass for `CrossMapLRN2dImpl`. +/// See the documentation for `CrossMapLRN2dImpl` class to learn what methods it +/// provides, and examples of how to use `CrossMapLRN2d` with +/// `torch::nn::CrossMapLRN2dOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +TORCH_MODULE(CrossMapLRN2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GroupNorm ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies Group Normalization over a mini-batch of inputs as described in +/// the paper `Group Normalization`_ . +/// See https://pytorch.org/docs/master/nn.html#torch.nn.GroupNorm to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::GroupNormOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// GroupNorm model(GroupNormOptions(2, 2).eps(2e-5).affine(false)); +/// ``` +class TORCH_API GroupNormImpl : public torch::nn::Cloneable { + public: + GroupNormImpl(int64_t num_groups, int64_t num_channels) + : GroupNormImpl(GroupNormOptions(num_groups, num_channels)) {} + explicit GroupNormImpl(const GroupNormOptions& options_); + + void reset() override; + + void reset_parameters(); + + /// Pretty prints the `GroupNorm` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input); + + /// The options with which this module was constructed. + GroupNormOptions options; + + /// The learned weight. + Tensor weight; + + /// The learned bias. + Tensor bias; +}; + +/// A `ModuleHolder` subclass for `GroupNormImpl`. +/// See the documentation for `GroupNormImpl` class to learn what methods it +/// provides, and examples of how to use `GroupNorm` with +/// `torch::nn::GroupNormOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(GroupNorm); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/padding.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/padding.h new file mode 100644 index 0000000000000000000000000000000000000000..9a93af0dd1192e7251cbd6ce4207abdab8fbee76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/padding.h @@ -0,0 +1,378 @@ +#pragma once + +#include +#include +#include + +#include + +namespace torch { +namespace nn { + +/// Base class for all (dimension-specialized) ReflectionPad modules. +template +class TORCH_API ReflectionPadImpl : public torch::nn::Cloneable { + public: + ReflectionPadImpl(ExpandingArray padding) + : ReflectionPadImpl(ReflectionPadOptions(padding)) {} + explicit ReflectionPadImpl(const ReflectionPadOptions& options_); + + void reset() override; + + Tensor forward(const Tensor& input); + + /// Pretty prints the `ReflectionPad{1,2}d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + ReflectionPadOptions options; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReflectionPad1d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies ReflectionPad over a 1-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ReflectionPad1d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ReflectionPad1dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ReflectionPad1d model(ReflectionPad1dOptions({3, 1})); +/// ``` +class TORCH_API ReflectionPad1dImpl + : public ReflectionPadImpl<1, ReflectionPad1dImpl> { + public: + using ReflectionPadImpl<1, ReflectionPad1dImpl>::ReflectionPadImpl; +}; + +/// A `ModuleHolder` subclass for `ReflectionPad1dImpl`. +/// See the documentation for `ReflectionPad1dImpl` class to learn what methods +/// it provides, and examples of how to use `ReflectionPad1d` with +/// `torch::nn::ReflectionPad1dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(ReflectionPad1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReflectionPad2d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies ReflectionPad over a 2-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ReflectionPad2d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ReflectionPad2dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ReflectionPad2d model(ReflectionPad2dOptions({1, 1, 2, 0})); +/// ``` +class TORCH_API ReflectionPad2dImpl + : public ReflectionPadImpl<2, ReflectionPad2dImpl> { + public: + using ReflectionPadImpl<2, ReflectionPad2dImpl>::ReflectionPadImpl; +}; + +/// A `ModuleHolder` subclass for `ReflectionPad2dImpl`. +/// See the documentation for `ReflectionPad2dImpl` class to learn what methods +/// it provides, and examples of how to use `ReflectionPad2d` with +/// `torch::nn::ReflectionPad2dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(ReflectionPad2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReflectionPad3d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies ReflectionPad over a 3-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ReflectionPad3d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ReflectionPad3dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ReflectionPad3d model(ReflectionPad3dOptions(1)); +/// ReflectionPad3d model(ReflectionPad3dOptions({1, 1, 2, 0, 1, 2})); +/// ``` +class TORCH_API ReflectionPad3dImpl + : public ReflectionPadImpl<3, ReflectionPad3dImpl> { + public: + using ReflectionPadImpl<3, ReflectionPad3dImpl>::ReflectionPadImpl; +}; + +/// A `ModuleHolder` subclass for `ReflectionPad3dImpl`. +/// See the documentation for `ReflectionPad3dImpl` class to learn what methods +/// it provides, and examples of how to use `ReflectionPad3d` with +/// `torch::nn::ReflectionPad3dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(ReflectionPad3d); + +// ============================================================================ + +/// Base class for all (dimension-specialized) ReplicationPad modules. +template +class TORCH_API ReplicationPadImpl : public torch::nn::Cloneable { + public: + ReplicationPadImpl(ExpandingArray padding) + : ReplicationPadImpl(ReplicationPadOptions(padding)) {} + explicit ReplicationPadImpl(const ReplicationPadOptions& options_); + + void reset() override; + + Tensor forward(const Tensor& input); + + /// Pretty prints the `ReplicationPad{1,2}d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + ReplicationPadOptions options; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReplicationPad1d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies ReplicationPad over a 1-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ReplicationPad1d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ReplicationPad1dOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ReplicationPad1d model(ReplicationPad1dOptions({3, 1})); +/// ``` +class TORCH_API ReplicationPad1dImpl + : public ReplicationPadImpl<1, ReplicationPad1dImpl> { + public: + using ReplicationPadImpl<1, ReplicationPad1dImpl>::ReplicationPadImpl; +}; + +/// A `ModuleHolder` subclass for `ReplicationPad1dImpl`. +/// See the documentation for `ReplicationPad1dImpl` class to learn what methods +/// it provides, and examples of how to use `ReplicationPad1d` with +/// `torch::nn::ReplicationPad1dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(ReplicationPad1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReplicationPad2d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies ReplicationPad over a 2-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ReplicationPad2d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ReplicationPad2dOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ReplicationPad2d model(ReplicationPad2dOptions({1, 1, 2, 0})); +/// ``` +class TORCH_API ReplicationPad2dImpl + : public ReplicationPadImpl<2, ReplicationPad2dImpl> { + public: + using ReplicationPadImpl<2, ReplicationPad2dImpl>::ReplicationPadImpl; +}; + +/// A `ModuleHolder` subclass for `ReplicationPad2dImpl`. +/// See the documentation for `ReplicationPad2dImpl` class to learn what methods +/// it provides, and examples of how to use `ReplicationPad2d` with +/// `torch::nn::ReplicationPad2dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(ReplicationPad2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReplicationPad3d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies ReplicationPad over a 3-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ReplicationPad3d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ReplicationPad3dOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ReplicationPad3d model(ReplicationPad3dOptions({1, 2, 1, 2, 1, 2})); +/// ``` +class TORCH_API ReplicationPad3dImpl + : public ReplicationPadImpl<3, ReplicationPad3dImpl> { + public: + using ReplicationPadImpl<3, ReplicationPad3dImpl>::ReplicationPadImpl; +}; + +/// A `ModuleHolder` subclass for `ReplicationPad3dImpl`. +/// See the documentation for `ReplicationPad3dImpl` class to learn what methods +/// it provides, and examples of how to use `ReplicationPad3d` with +/// `torch::nn::ReplicationPad3dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(ReplicationPad3d); + +// ============================================================================ + +/// Base class for all (dimension-specialized) ZeroPad modules. +template +class TORCH_API ZeroPadImpl : public torch::nn::Cloneable { + public: + ZeroPadImpl(ExpandingArray padding) + : ZeroPadImpl(ZeroPadOptions(padding)) {} + explicit ZeroPadImpl(const ZeroPadOptions& options_); + + void reset() override; + + Tensor forward(const Tensor& input); + + /// Pretty prints the `ZeroPad{1,2}d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + ZeroPadOptions options; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ZeroPad1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Applies ZeroPad over a 1-D input. +class TORCH_API ZeroPad1dImpl : public ZeroPadImpl<1, ZeroPad1dImpl> { + public: + using ZeroPadImpl<1, ZeroPad1dImpl>::ZeroPadImpl; +}; + +/// A `ModuleHolder` subclass for `ZeroPad1dImpl`. +/// See the documentation for `ZeroPad1dImpl` class to learn what methods it +/// provides, and examples of how to use `ZeroPad1d` with +/// `torch::nn::ZeroPad1dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(ZeroPad1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ZeroPad2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Applies ZeroPad over a 2-D input. +class TORCH_API ZeroPad2dImpl : public ZeroPadImpl<2, ZeroPad2dImpl> { + public: + using ZeroPadImpl<2, ZeroPad2dImpl>::ZeroPadImpl; +}; + +/// A `ModuleHolder` subclass for `ZeroPad2dImpl`. +/// See the documentation for `ZeroPad2dImpl` class to learn what methods it +/// provides, and examples of how to use `ZeroPad2d` with +/// `torch::nn::ZeroPad2dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(ZeroPad2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ZeroPad3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Applies ZeroPad over a 3-D input. +class TORCH_API ZeroPad3dImpl : public ZeroPadImpl<3, ZeroPad3dImpl> { + public: + using ZeroPadImpl<3, ZeroPad3dImpl>::ZeroPadImpl; +}; + +/// A `ModuleHolder` subclass for `ZeroPad3dImpl`. +/// See the documentation for `ZeroPad3dImpl` class to learn what methods it +/// provides, and examples of how to use `ZeroPad3d` with +/// `torch::nn::ZeroPad3dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(ZeroPad3d); + +// ============================================================================ + +/// Base class for all (dimension-specialized) ConstantPad modules. +template +class TORCH_API ConstantPadImpl : public torch::nn::Cloneable { + public: + ConstantPadImpl(ExpandingArray padding, double value) + : ConstantPadImpl(ConstantPadOptions(padding, value)) {} + explicit ConstantPadImpl(const ConstantPadOptions& options_); + + void reset() override; + + Tensor forward(const Tensor& input); + + /// Pretty prints the `ConstantPad{1,2}d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + ConstantPadOptions options; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConstantPad1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies ConstantPad over a 1-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ConstantPad1d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ConstantPad1dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ConstantPad1d model(ConstantPad1dOptions({3, 1}, 3.5)); +/// ``` +class TORCH_API ConstantPad1dImpl + : public ConstantPadImpl<1, ConstantPad1dImpl> { + public: + using ConstantPadImpl<1, ConstantPad1dImpl>::ConstantPadImpl; +}; + +/// A `ModuleHolder` subclass for `ConstantPad1dImpl`. +/// See the documentation for `ConstantPad1dImpl` class to learn what methods it +/// provides, and examples of how to use `ConstantPad1d` with +/// `torch::nn::ConstantPad1dOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +TORCH_MODULE(ConstantPad1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConstantPad2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies ConstantPad over a 2-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ConstantPad2d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ConstantPad2dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ConstantPad2d model(ConstantPad2dOptions({3, 0, 2, 1}, 3.5)); +/// ``` +class TORCH_API ConstantPad2dImpl + : public ConstantPadImpl<2, ConstantPad2dImpl> { + public: + using ConstantPadImpl<2, ConstantPad2dImpl>::ConstantPadImpl; +}; + +/// A `ModuleHolder` subclass for `ConstantPad2dImpl`. +/// See the documentation for `ConstantPad2dImpl` class to learn what methods it +/// provides, and examples of how to use `ConstantPad2d` with +/// `torch::nn::ConstantPad2dOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +TORCH_MODULE(ConstantPad2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConstantPad3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies ConstantPad over a 3-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.ConstantPad3d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::ConstantPad3dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2}, 3.5)); +/// ``` +class TORCH_API ConstantPad3dImpl + : public ConstantPadImpl<3, ConstantPad3dImpl> { + public: + using ConstantPadImpl<3, ConstantPad3dImpl>::ConstantPadImpl; +}; + +/// A `ModuleHolder` subclass for `ConstantPad3dImpl`. +/// See the documentation for `ConstantPad3dImpl` class to learn what methods it +/// provides, and examples of how to use `ConstantPad3d` with +/// `torch::nn::ConstantPad3dOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +TORCH_MODULE(ConstantPad3d); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/pixelshuffle.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/pixelshuffle.h new file mode 100644 index 0000000000000000000000000000000000000000..e47e6851910529698150220f8c1b73c6e1945982 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/pixelshuffle.h @@ -0,0 +1,88 @@ +#pragma once + +#include +#include +#include + +#include + +namespace torch { +namespace nn { + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PixelShuffle +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)` +/// to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an +/// upscale factor. See +/// https://pytorch.org/docs/master/nn.html#torch.nn.PixelShuffle to learn about +/// the exact behavior of this module. +/// +/// See the documentation for `torch::nn::PixelShuffleOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// PixelShuffle model(PixelShuffleOptions(5)); +/// ``` +struct TORCH_API PixelShuffleImpl + : public torch::nn::Cloneable { + explicit PixelShuffleImpl(const PixelShuffleOptions& options_); + + /// Pretty prints the `PixelShuffle` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input); + + void reset() override; + + /// The options with which this `Module` was constructed. + PixelShuffleOptions options; +}; + +/// A `ModuleHolder` subclass for `PixelShuffleImpl`. +/// See the documentation for `PixelShuffleImpl` class to learn what methods it +/// provides, and examples of how to use `PixelShuffle` with +/// `torch::nn::PixelShuffleOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +TORCH_MODULE(PixelShuffle); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PixelUnshuffle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Reverses the PixelShuffle operation by rearranging elements in a tensor of +/// shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape :math:`(*, +/// C \times r^2, H, W)`, where r is a downscale factor. See +/// https://pytorch.org/docs/master/nn.html#torch.nn.PixelUnshuffle to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::PixelUnshuffleOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// PixelUnshuffle model(PixelUnshuffleOptions(5)); +/// ``` +struct TORCH_API PixelUnshuffleImpl + : public torch::nn::Cloneable { + explicit PixelUnshuffleImpl(const PixelUnshuffleOptions& options_); + + /// Pretty prints the `PixelUnshuffle` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input); + + void reset() override; + + /// The options with which this `Module` was constructed. + PixelUnshuffleOptions options; +}; + +/// A `ModuleHolder` subclass for `PixelUnshuffleImpl`. +/// See the documentation for `PixelUnshuffleImpl` class to learn what methods +/// it provides, and examples of how to use `PixelUnshuffle` with +/// `torch::nn::PixelUnshuffleOptions`. See the documentation for `ModuleHolder` +/// to learn about PyTorch's module storage semantics. +TORCH_MODULE(PixelUnshuffle); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/pooling.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/pooling.h new file mode 100644 index 0000000000000000000000000000000000000000..e7df9b1fe7f37310a0f6315b156a5a2251bae5bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/pooling.h @@ -0,0 +1,779 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace nn { + +/// Base class for all (dimension-specialized) avgpool modules. +template +class TORCH_API AvgPoolImpl : public torch::nn::Cloneable { + public: + AvgPoolImpl(ExpandingArray kernel_size) + : AvgPoolImpl(AvgPoolOptions(kernel_size)) {} + explicit AvgPoolImpl(const AvgPoolOptions& options_); + + void reset() override; + + /// Pretty prints the `AvgPool{1,2,3}d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + AvgPoolOptions options; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AvgPool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies avgpool over a 1-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.AvgPool1d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::AvgPool1dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// AvgPool1d model(AvgPool1dOptions(3).stride(2)); +/// ``` +class TORCH_API AvgPool1dImpl : public AvgPoolImpl<1, AvgPool1dImpl> { + public: + using AvgPoolImpl<1, AvgPool1dImpl>::AvgPoolImpl; + Tensor forward(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `AvgPool1dImpl`. +/// See the documentation for `AvgPool1dImpl` class to learn what methods it +/// provides, and examples of how to use `AvgPool1d` with +/// `torch::nn::AvgPool1dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(AvgPool1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AvgPool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies avgpool over a 2-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.AvgPool2d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::AvgPool2dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// AvgPool2d model(AvgPool2dOptions({3, 2}).stride({2, 2})); +/// ``` +class TORCH_API AvgPool2dImpl : public AvgPoolImpl<2, AvgPool2dImpl> { + public: + using AvgPoolImpl<2, AvgPool2dImpl>::AvgPoolImpl; + Tensor forward(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `AvgPool2dImpl`. +/// See the documentation for `AvgPool2dImpl` class to learn what methods it +/// provides, and examples of how to use `AvgPool2d` with +/// `torch::nn::AvgPool2dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(AvgPool2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AvgPool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies avgpool over a 3-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.AvgPool3d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::AvgPool3dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// AvgPool3d model(AvgPool3dOptions(5).stride(2)); +/// ``` +class TORCH_API AvgPool3dImpl : public AvgPoolImpl<3, AvgPool3dImpl> { + public: + using AvgPoolImpl<3, AvgPool3dImpl>::AvgPoolImpl; + Tensor forward(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `AvgPool3dImpl`. +/// See the documentation for `AvgPool3dImpl` class to learn what methods it +/// provides, and examples of how to use `AvgPool3d` with +/// `torch::nn::AvgPool3dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(AvgPool3d); + +// ============================================================================ + +/// Base class for all (dimension-specialized) maxpool modules. +template +class TORCH_API MaxPoolImpl : public torch::nn::Cloneable { + public: + MaxPoolImpl(ExpandingArray kernel_size) + : MaxPoolImpl(MaxPoolOptions(kernel_size)) {} + explicit MaxPoolImpl(const MaxPoolOptions& options_); + + void reset() override; + + /// Pretty prints the `MaxPool{1,2,3}d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + MaxPoolOptions options; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MaxPool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies maxpool over a 1-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.MaxPool1d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::MaxPool1dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// MaxPool1d model(MaxPool1dOptions(3).stride(2)); +/// ``` +class TORCH_API MaxPool1dImpl : public MaxPoolImpl<1, MaxPool1dImpl> { + public: + using MaxPoolImpl<1, MaxPool1dImpl>::MaxPoolImpl; + Tensor forward(const Tensor& input); + + /// Returns the outputs and the indices of the max values. + /// Useful for `torch::nn::MaxUnpool1d` later. + std::tuple forward_with_indices(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `MaxPool1dImpl`. +/// See the documentation for `MaxPool1dImpl` class to learn what methods it +/// provides, and examples of how to use `MaxPool1d` with +/// `torch::nn::MaxPool1dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(MaxPool1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MaxPool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies maxpool over a 2-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.MaxPool2d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::MaxPool2dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// MaxPool2d model(MaxPool2dOptions({3, 2}).stride({2, 2})); +/// ``` +class TORCH_API MaxPool2dImpl : public MaxPoolImpl<2, MaxPool2dImpl> { + public: + using MaxPoolImpl<2, MaxPool2dImpl>::MaxPoolImpl; + Tensor forward(const Tensor& input); + + /// Returns the outputs and the indices of the max values. + /// Useful for `torch::nn::MaxUnpool2d` later. + std::tuple forward_with_indices(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `MaxPool2dImpl`. +/// See the documentation for `MaxPool2dImpl` class to learn what methods it +/// provides, and examples of how to use `MaxPool2d` with +/// `torch::nn::MaxPool2dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(MaxPool2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MaxPool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies maxpool over a 3-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.MaxPool3d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::MaxPool3dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// MaxPool3d model(MaxPool3dOptions(3).stride(2)); +/// ``` +class TORCH_API MaxPool3dImpl : public MaxPoolImpl<3, MaxPool3dImpl> { + public: + using MaxPoolImpl<3, MaxPool3dImpl>::MaxPoolImpl; + Tensor forward(const Tensor& input); + + /// Returns the outputs and the indices of the max values. + /// Useful for `torch::nn::MaxUnpool3d` later. + std::tuple forward_with_indices(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `MaxPool3dImpl`. +/// See the documentation for `MaxPool3dImpl` class to learn what methods it +/// provides, and examples of how to use `MaxPool3d` with +/// `torch::nn::MaxPool3dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(MaxPool3d); + +// ============================================================================ + +/// Base class for all (dimension-specialized) adaptive maxpool modules. +template +class TORCH_API AdaptiveMaxPoolImpl : public torch::nn::Cloneable { + public: + AdaptiveMaxPoolImpl(output_size_t output_size) + : AdaptiveMaxPoolImpl( + AdaptiveMaxPoolOptions(output_size)) {} + explicit AdaptiveMaxPoolImpl( + const AdaptiveMaxPoolOptions& options_) + : options(options_) {} + + void reset() override{}; + + /// Pretty prints the `AdaptiveMaxPool{1,2,3}d` module into the given + /// `stream`. + void pretty_print(std::ostream& stream) const override { + stream << "torch::nn::AdaptiveMaxPool" << D << "d" + << "(output_size=" << options.output_size() << ")"; + } + + /// The options with which this `Module` was constructed. + AdaptiveMaxPoolOptions options; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveMaxPool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies adaptive maxpool over a 1-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveMaxPool1d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::AdaptiveMaxPool1dOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// AdaptiveMaxPool1d model(AdaptiveMaxPool1dOptions(3)); +/// ``` +class TORCH_API AdaptiveMaxPool1dImpl + : public AdaptiveMaxPoolImpl<1, ExpandingArray<1>, AdaptiveMaxPool1dImpl> { + public: + using AdaptiveMaxPoolImpl<1, ExpandingArray<1>, AdaptiveMaxPool1dImpl>:: + AdaptiveMaxPoolImpl; + + Tensor forward(const Tensor& input); + + /// Returns the indices along with the outputs. + /// Useful to pass to nn.MaxUnpool1d. + std::tuple forward_with_indices(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `AdaptiveMaxPool1dImpl`. +/// See the documentation for `AdaptiveMaxPool1dImpl` class to learn what +/// methods it provides, and examples of how to use `AdaptiveMaxPool1d` with +/// `torch::nn::AdaptiveMaxPool1dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(AdaptiveMaxPool1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveMaxPool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies adaptive maxpool over a 2-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveMaxPool2d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::AdaptiveMaxPool2dOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// AdaptiveMaxPool2d model(AdaptiveMaxPool2dOptions({3, 2})); +/// ``` +class TORCH_API AdaptiveMaxPool2dImpl : public AdaptiveMaxPoolImpl< + 2, + ExpandingArrayWithOptionalElem<2>, + AdaptiveMaxPool2dImpl> { + public: + using AdaptiveMaxPoolImpl< + 2, + ExpandingArrayWithOptionalElem<2>, + AdaptiveMaxPool2dImpl>::AdaptiveMaxPoolImpl; + + Tensor forward(const Tensor& input); + + /// Returns the indices along with the outputs. + /// Useful to pass to nn.MaxUnpool2d. + std::tuple forward_with_indices(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `AdaptiveMaxPool2dImpl`. +/// See the documentation for `AdaptiveMaxPool2dImpl` class to learn what +/// methods it provides, and examples of how to use `AdaptiveMaxPool2d` with +/// `torch::nn::AdaptiveMaxPool2dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(AdaptiveMaxPool2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveMaxPool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies adaptive maxpool over a 3-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveMaxPool3d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::AdaptiveMaxPool3dOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// AdaptiveMaxPool3d model(AdaptiveMaxPool3dOptions(3)); +/// ``` +class TORCH_API AdaptiveMaxPool3dImpl : public AdaptiveMaxPoolImpl< + 3, + ExpandingArrayWithOptionalElem<3>, + AdaptiveMaxPool3dImpl> { + public: + using AdaptiveMaxPoolImpl< + 3, + ExpandingArrayWithOptionalElem<3>, + AdaptiveMaxPool3dImpl>::AdaptiveMaxPoolImpl; + + Tensor forward(const Tensor& input); + + /// Returns the indices along with the outputs. + /// Useful to pass to nn.MaxUnpool3d. + std::tuple forward_with_indices(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `AdaptiveMaxPool3dImpl`. +/// See the documentation for `AdaptiveMaxPool3dImpl` class to learn what +/// methods it provides, and examples of how to use `AdaptiveMaxPool3d` with +/// `torch::nn::AdaptiveMaxPool3dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(AdaptiveMaxPool3d); + +// ============================================================================ + +/// Base class for all (dimension-specialized) adaptive avgpool modules. +template +class TORCH_API AdaptiveAvgPoolImpl : public torch::nn::Cloneable { + public: + AdaptiveAvgPoolImpl(output_size_t output_size) + : AdaptiveAvgPoolImpl( + AdaptiveAvgPoolOptions(output_size)) {} + explicit AdaptiveAvgPoolImpl( + const AdaptiveAvgPoolOptions& options_) + : options(options_) {} + + void reset() override {} + + /// Pretty prints the `AdaptiveAvgPool{1,2,3}d` module into the given + /// `stream`. + void pretty_print(std::ostream& stream) const override { + stream << "torch::nn::AdaptiveAvgPool" << D << "d" + << "(output_size=" << options.output_size() << ")"; + } + + /// The options with which this `Module` was constructed. + AdaptiveAvgPoolOptions options; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveAvgPool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies adaptive avgpool over a 1-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveAvgPool1d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::AdaptiveAvgPool1dOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// AdaptiveAvgPool1d model(AdaptiveAvgPool1dOptions(5)); +/// ``` +class TORCH_API AdaptiveAvgPool1dImpl + : public AdaptiveAvgPoolImpl<1, ExpandingArray<1>, AdaptiveAvgPool1dImpl> { + public: + using AdaptiveAvgPoolImpl<1, ExpandingArray<1>, AdaptiveAvgPool1dImpl>:: + AdaptiveAvgPoolImpl; + + Tensor forward(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `AdaptiveAvgPool1dImpl`. +/// See the documentation for `AdaptiveAvgPool1dImpl` class to learn what +/// methods it provides, and examples of how to use `AdaptiveAvgPool1d` with +/// `torch::nn::AdaptiveAvgPool1dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(AdaptiveAvgPool1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveAvgPool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies adaptive avgpool over a 2-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveAvgPool2d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::AdaptiveAvgPool2dOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// AdaptiveAvgPool2d model(AdaptiveAvgPool2dOptions({3, 2})); +/// ``` +class TORCH_API AdaptiveAvgPool2dImpl : public AdaptiveAvgPoolImpl< + 2, + ExpandingArrayWithOptionalElem<2>, + AdaptiveAvgPool2dImpl> { + public: + using AdaptiveAvgPoolImpl< + 2, + ExpandingArrayWithOptionalElem<2>, + AdaptiveAvgPool2dImpl>::AdaptiveAvgPoolImpl; + + Tensor forward(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `AdaptiveAvgPool2dImpl`. +/// See the documentation for `AdaptiveAvgPool2dImpl` class to learn what +/// methods it provides, and examples of how to use `AdaptiveAvgPool2d` with +/// `torch::nn::AdaptiveAvgPool2dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(AdaptiveAvgPool2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveAvgPool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies adaptive avgpool over a 3-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveAvgPool3d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::AdaptiveAvgPool3dOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// AdaptiveAvgPool3d model(AdaptiveAvgPool3dOptions(3)); +/// ``` +class TORCH_API AdaptiveAvgPool3dImpl : public AdaptiveAvgPoolImpl< + 3, + ExpandingArrayWithOptionalElem<3>, + AdaptiveAvgPool3dImpl> { + public: + using AdaptiveAvgPoolImpl< + 3, + ExpandingArrayWithOptionalElem<3>, + AdaptiveAvgPool3dImpl>::AdaptiveAvgPoolImpl; + + Tensor forward(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `AdaptiveAvgPool3dImpl`. +/// See the documentation for `AdaptiveAvgPool3dImpl` class to learn what +/// methods it provides, and examples of how to use `AdaptiveAvgPool3d` with +/// `torch::nn::AdaptiveAvgPool3dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(AdaptiveAvgPool3d); + +// ============================================================================ + +/// Base class for all (dimension-specialized) maxunpool modules. +template +class TORCH_API MaxUnpoolImpl : public torch::nn::Cloneable { + public: + MaxUnpoolImpl(ExpandingArray kernel_size) + : MaxUnpoolImpl(MaxUnpoolOptions(kernel_size)) {} + explicit MaxUnpoolImpl(const MaxUnpoolOptions& options_); + + void reset() override; + + /// Pretty prints the `MaxUnpool{1,2,3}d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// The options with which this `Module` was constructed. + MaxUnpoolOptions options; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MaxUnpool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies maxunpool over a 1-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.MaxUnpool1d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::MaxUnpool1dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// MaxUnpool1d model(MaxUnpool1dOptions(3).stride(2).padding(1)); +/// ``` +class TORCH_API MaxUnpool1dImpl : public MaxUnpoolImpl<1, MaxUnpool1dImpl> { + public: + using MaxUnpoolImpl<1, MaxUnpool1dImpl>::MaxUnpoolImpl; + Tensor forward( + const Tensor& input, + const Tensor& indices, + const c10::optional>& output_size = c10::nullopt); + + protected: + FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(c10::optional>())}) +}; + +/// A `ModuleHolder` subclass for `MaxUnpool1dImpl`. +/// See the documentation for `MaxUnpool1dImpl` class to learn what methods it +/// provides, and examples of how to use `MaxUnpool1d` with +/// `torch::nn::MaxUnpool1dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(MaxUnpool1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MaxUnpool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies maxunpool over a 2-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.MaxUnpool2d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::MaxUnpool2dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// MaxUnpool2d model(MaxUnpool2dOptions(3).stride(2).padding(1)); +/// ``` +class TORCH_API MaxUnpool2dImpl : public MaxUnpoolImpl<2, MaxUnpool2dImpl> { + public: + using MaxUnpoolImpl<2, MaxUnpool2dImpl>::MaxUnpoolImpl; + Tensor forward( + const Tensor& input, + const Tensor& indices, + const c10::optional>& output_size = c10::nullopt); + + protected: + FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(c10::optional>())}) +}; + +/// A `ModuleHolder` subclass for `MaxUnpool2dImpl`. +/// See the documentation for `MaxUnpool2dImpl` class to learn what methods it +/// provides, and examples of how to use `MaxUnpool2d` with +/// `torch::nn::MaxUnpool2dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(MaxUnpool2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MaxUnpool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies maxunpool over a 3-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.MaxUnpool3d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::MaxUnpool3dOptions` class to learn +/// what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// MaxUnpool3d model(MaxUnpool3dOptions(3).stride(2).padding(1)); +/// ``` +class TORCH_API MaxUnpool3dImpl : public MaxUnpoolImpl<3, MaxUnpool3dImpl> { + public: + using MaxUnpoolImpl<3, MaxUnpool3dImpl>::MaxUnpoolImpl; + Tensor forward( + const Tensor& input, + const Tensor& indices, + const c10::optional>& output_size = c10::nullopt); + + protected: + FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(c10::optional>())}) +}; + +/// A `ModuleHolder` subclass for `MaxUnpool3dImpl`. +/// See the documentation for `MaxUnpool3dImpl` class to learn what methods it +/// provides, and examples of how to use `MaxUnpool3d` with +/// `torch::nn::MaxUnpool3dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(MaxUnpool3d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FractionalMaxPool2d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies fractional maxpool over a 2-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.FractionalMaxPool2d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::FractionalMaxPool2dOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// FractionalMaxPool2d model(FractionalMaxPool2dOptions(5).output_size(1)); +/// ``` +class TORCH_API FractionalMaxPool2dImpl + : public torch::nn::Cloneable { + public: + FractionalMaxPool2dImpl(ExpandingArray<2> kernel_size) + : FractionalMaxPool2dImpl(FractionalMaxPool2dOptions(kernel_size)) {} + explicit FractionalMaxPool2dImpl(FractionalMaxPool2dOptions options_); + + void reset() override; + + /// Pretty prints the `FractionalMaxPool2d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input); + + /// Returns the outputs and the indices of the max values. + /// Useful for `torch::nn::MaxUnpool2d` later. + std::tuple forward_with_indices(const Tensor& input); + + /// The options with which this `Module` was constructed. + FractionalMaxPool2dOptions options; + + Tensor _random_samples; +}; + +/// A `ModuleHolder` subclass for `FractionalMaxPool2dImpl`. +/// See the documentation for `FractionalMaxPool2dImpl` class to learn what +/// methods it provides, and examples of how to use `FractionalMaxPool2d` with +/// `torch::nn::FractionalMaxPool2dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(FractionalMaxPool2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FractionalMaxPool3d +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies fractional maxpool over a 3-D input. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.FractionalMaxPool3d to +/// learn about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::FractionalMaxPool3dOptions` class to +/// learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// FractionalMaxPool3d model(FractionalMaxPool3dOptions(5).output_size(1)); +/// ``` +class TORCH_API FractionalMaxPool3dImpl + : public torch::nn::Cloneable { + public: + FractionalMaxPool3dImpl(ExpandingArray<3> kernel_size) + : FractionalMaxPool3dImpl(FractionalMaxPool3dOptions(kernel_size)) {} + explicit FractionalMaxPool3dImpl(FractionalMaxPool3dOptions options_); + + void reset() override; + + /// Pretty prints the `FractionalMaxPool3d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input); + + /// Returns the outputs and the indices of the max values. + /// Useful for `torch::nn::MaxUnpool3d` later. + std::tuple forward_with_indices(const Tensor& input); + + /// The options with which this `Module` was constructed. + FractionalMaxPool3dOptions options; + + Tensor _random_samples; +}; + +/// A `ModuleHolder` subclass for `FractionalMaxPool3dImpl`. +/// See the documentation for `FractionalMaxPool3dImpl` class to learn what +/// methods it provides, and examples of how to use `FractionalMaxPool3d` with +/// `torch::nn::FractionalMaxPool3dOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(FractionalMaxPool3d); + +// ============================================================================ + +/// Base class for all (dimension-specialized) lppool modules. +template +class TORCH_API LPPoolImpl : public torch::nn::Cloneable { + public: + LPPoolImpl(double norm_type, ExpandingArray kernel_size) + : LPPoolImpl(LPPoolOptions(norm_type, kernel_size)) {} + explicit LPPoolImpl(const LPPoolOptions& options_); + + void reset() override; + + /// Pretty prints the `LPPool{1,2}d` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + LPPoolOptions options; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LPPool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the LPPool1d function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.LPPool1d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::LPPool1dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// LPPool1d model(LPPool1dOptions(1, 2).stride(5).ceil_mode(true)); +/// ``` +class TORCH_API LPPool1dImpl : public LPPoolImpl<1, LPPool1dImpl> { + public: + using LPPoolImpl<1, LPPool1dImpl>::LPPoolImpl; + + Tensor forward(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `LPPool1dImpl`. +/// See the documentation for `LPPool1dImpl` class to learn what methods it +/// provides, and examples of how to use `LPPool1d` with +/// `torch::nn::LPPool1dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(LPPool1d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LPPool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the LPPool2d function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.LPPool2d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::LPPool2dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// LPPool2d model(LPPool2dOptions(1, std::vector({3, 4})).stride({5, +/// 6}).ceil_mode(true)); +/// ``` +class TORCH_API LPPool2dImpl : public LPPoolImpl<2, LPPool2dImpl> { + public: + using LPPoolImpl<2, LPPool2dImpl>::LPPoolImpl; + + Tensor forward(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `LPPool2dImpl`. +/// See the documentation for `LPPool2dImpl` class to learn what methods it +/// provides, and examples of how to use `LPPool2d` with +/// `torch::nn::LPPool2dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(LPPool2d); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LPPool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Applies the LPPool3d function element-wise. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.LPPool3d to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::LPPool3dOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// LPPool3d model(LPPool3dOptions(1, std::vector({3, 4, 5})).stride( +/// {5, 6, 7}).ceil_mode(true)); +/// ``` +class TORCH_API LPPool3dImpl : public LPPoolImpl<3, LPPool3dImpl> { + public: + using LPPoolImpl<3, LPPool3dImpl>::LPPoolImpl; + + Tensor forward(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `LPPool3dImpl`. +/// See the documentation for `LPPool3dImpl` class to learn what methods it +/// provides, and examples of how to use `LPPool3d` with +/// `torch::nn::LPPool3dOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(LPPool3d); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/rnn.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/rnn.h new file mode 100644 index 0000000000000000000000000000000000000000..9c5ac5f3f45940a39bdb56a411d481de402fd9f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/rnn.h @@ -0,0 +1,401 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +namespace detail { +/// Base class for all RNN implementations (intended for code sharing). +template +class TORCH_API RNNImplBase : public torch::nn::Cloneable { + public: + explicit RNNImplBase(const RNNOptionsBase& options_); + + /// Initializes the parameters of the RNN module. + void reset() override; + + void reset_parameters(); + + /// Overrides `nn::Module::to()` to call `flatten_parameters()` after the + /// original operation. + void to(torch::Device device, torch::Dtype dtype, bool non_blocking = false) + override; + void to(torch::Dtype dtype, bool non_blocking = false) override; + void to(torch::Device device, bool non_blocking = false) override; + + /// Pretty prints the RNN module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + /// Modifies the internal storage of weights for optimization purposes. + /// + /// On CPU, this method should be called if any of the weight or bias vectors + /// are changed (i.e. weights are added or removed). On GPU, it should be + /// called __any time the storage of any parameter is modified__, e.g. any + /// time a parameter is assigned a new value. This allows using the fast path + /// in cuDNN implementations of respective RNN `forward()` methods. It is + /// called once upon construction, inside `reset()`. + void flatten_parameters(); + + std::vector all_weights() const; + + /// The RNN's options. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + RNNOptionsBase options_base; + + protected: + // Resets flat_weights_ + // Note: be v. careful before removing this, as 3rd party device types + // likely rely on this behavior to properly .to() modules like LSTM. + void reset_flat_weights(); + + void check_input(const Tensor& input, const Tensor& batch_sizes) const; + + std::tuple get_expected_hidden_size( + const Tensor& input, + const Tensor& batch_sizes) const; + + void check_hidden_size( + const Tensor& hx, + std::tuple expected_hidden_size, + std::string msg = "Expected hidden size {1}, got {2}") const; + + void check_forward_args(Tensor input, Tensor hidden, Tensor batch_sizes) + const; + + Tensor permute_hidden(Tensor hx, const Tensor& permutation) const; + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector flat_weights_names_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector> all_weights_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector flat_weights_; +}; +} // namespace detail + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RNN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// A multi-layer Elman RNN module with Tanh or ReLU activation. +/// See https://pytorch.org/docs/master/generated/torch.nn.RNN.html to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::RNNOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// RNN model(RNNOptions(128, +/// 64).num_layers(3).dropout(0.2).nonlinearity(torch::kTanh)); +/// ``` +class TORCH_API RNNImpl : public detail::RNNImplBase { + public: + RNNImpl(int64_t input_size, int64_t hidden_size) + : RNNImpl(RNNOptions(input_size, hidden_size)) {} + explicit RNNImpl(const RNNOptions& options_); + + std::tuple forward(const Tensor& input, Tensor hx = {}); + + protected: + FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())}) + + public: + std::tuple + forward_with_packed_input( + const torch::nn::utils::rnn::PackedSequence& packed_input, + Tensor hx = {}); + + RNNOptions options; + + protected: + std::tuple forward_helper( + const Tensor& input, + const Tensor& batch_sizes, + const Tensor& sorted_indices, + int64_t max_batch_size, + Tensor hx); +}; + +/// A `ModuleHolder` subclass for `RNNImpl`. +/// See the documentation for `RNNImpl` class to learn what methods it +/// provides, and examples of how to use `RNN` with `torch::nn::RNNOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(RNN); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LSTM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// A multi-layer long-short-term-memory (LSTM) module. +/// See https://pytorch.org/docs/master/generated/torch.nn.LSTM.html to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::LSTMOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// LSTM model(LSTMOptions(2, +/// 4).num_layers(3).batch_first(false).bidirectional(true)); +/// ``` +class TORCH_API LSTMImpl : public detail::RNNImplBase { + public: + LSTMImpl(int64_t input_size, int64_t hidden_size) + : LSTMImpl(LSTMOptions(input_size, hidden_size)) {} + explicit LSTMImpl(const LSTMOptions& options_); + + std::tuple> forward( + const Tensor& input, + torch::optional> hx_opt = {}); + + protected: + FORWARD_HAS_DEFAULT_ARGS( + {1, AnyValue(torch::optional>())}) + + public: + std::tuple> + forward_with_packed_input( + const torch::nn::utils::rnn::PackedSequence& packed_input, + torch::optional> hx_opt = {}); + + LSTMOptions options; + + protected: + void check_forward_args( + const Tensor& input, + std::tuple hidden, + const Tensor& batch_sizes) const; + + std::tuple get_expected_cell_size( + const Tensor& input, + const Tensor& batch_sizes) const; + + std::tuple permute_hidden( + std::tuple hx, + const Tensor& permutation) const; + + std::tuple> forward_helper( + const Tensor& input, + const Tensor& batch_sizes, + const Tensor& sorted_indices, + int64_t max_batch_size, + torch::optional> hx_opt); +}; + +/// A `ModuleHolder` subclass for `LSTMImpl`. +/// See the documentation for `LSTMImpl` class to learn what methods it +/// provides, and examples of how to use `LSTM` with `torch::nn::LSTMOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(LSTM); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GRU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// A multi-layer gated recurrent unit (GRU) module. +/// See https://pytorch.org/docs/master/generated/torch.nn.GRU.html to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::GRUOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// GRU model(GRUOptions(2, +/// 4).num_layers(3).batch_first(false).bidirectional(true)); +/// ``` +class TORCH_API GRUImpl : public detail::RNNImplBase { + public: + GRUImpl(int64_t input_size, int64_t hidden_size) + : GRUImpl(GRUOptions(input_size, hidden_size)) {} + explicit GRUImpl(const GRUOptions& options_); + + std::tuple forward(const Tensor& input, Tensor hx = {}); + + protected: + FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(torch::Tensor())}) + + public: + std::tuple + forward_with_packed_input( + const torch::nn::utils::rnn::PackedSequence& packed_input, + Tensor hx = {}); + + GRUOptions options; + + protected: + std::tuple forward_helper( + const Tensor& input, + const Tensor& batch_sizes, + const Tensor& sorted_indices, + int64_t max_batch_size, + Tensor hx); +}; + +/// A `ModuleHolder` subclass for `GRUImpl`. +/// See the documentation for `GRUImpl` class to learn what methods it +/// provides, and examples of how to use `GRU` with `torch::nn::GRUOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(GRU); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RNNCellImplBase +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +namespace detail { +/// Base class for all RNNCell implementations (intended for code sharing). +template +class TORCH_API RNNCellImplBase : public torch::nn::Cloneable { + public: + explicit RNNCellImplBase(const RNNCellOptionsBase& options_); + + /// Initializes the parameters of the RNNCell module. + void reset() override; + + void reset_parameters(); + + /// Pretty prints the RNN module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + RNNCellOptionsBase options_base; + + Tensor weight_ih; + Tensor weight_hh; + Tensor bias_ih; + Tensor bias_hh; + + protected: + void check_forward_input(const Tensor& input, const std::string name) const; + virtual std::string get_nonlinearity_str() const; +}; +} // namespace detail + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RNNCell +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// An Elman RNN cell with tanh or ReLU non-linearity. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.RNNCell to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::RNNCellOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// RNNCell model(RNNCellOptions(20, +/// 10).bias(false).nonlinearity(torch::kReLU)); +/// ``` +class TORCH_API RNNCellImpl : public detail::RNNCellImplBase { + public: + RNNCellImpl(int64_t input_size, int64_t hidden_size) + : RNNCellImpl(RNNCellOptions(input_size, hidden_size)) {} + explicit RNNCellImpl(const RNNCellOptions& options_); + + Tensor forward(const Tensor& input, Tensor hx = {}); + + protected: + FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())}) + + public: + RNNCellOptions options; + + protected: + std::string get_nonlinearity_str() const override; +}; + +/// A `ModuleHolder` subclass for `RNNCellImpl`. +/// See the documentation for `RNNCellImpl` class to learn what methods it +/// provides, and examples of how to use `RNNCell` with +/// `torch::nn::RNNCellOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(RNNCell); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LSTMCell +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// A long short-term memory (LSTM) cell. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.LSTMCell to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::LSTMCellOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// LSTMCell model(LSTMCellOptions(20, 10).bias(false)); +/// ``` +class TORCH_API LSTMCellImpl : public detail::RNNCellImplBase { + public: + LSTMCellImpl(int64_t input_size, int64_t hidden_size) + : LSTMCellImpl(LSTMCellOptions(input_size, hidden_size)) {} + explicit LSTMCellImpl(const LSTMCellOptions& options_); + + std::tuple forward( + const Tensor& input, + torch::optional> hx_opt = {}); + + protected: + FORWARD_HAS_DEFAULT_ARGS( + {1, AnyValue(torch::optional>())}) + + public: + LSTMCellOptions options; +}; + +/// A `ModuleHolder` subclass for `LSTMCellImpl`. +/// See the documentation for `LSTMCellImpl` class to learn what methods it +/// provides, and examples of how to use `LSTMCell` with +/// `torch::nn::LSTMCellOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(LSTMCell); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GRUCell +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// A gated recurrent unit (GRU) cell. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.GRUCell to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::GRUCellOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// GRUCell model(GRUCellOptions(20, 10).bias(false)); +/// ``` +class TORCH_API GRUCellImpl : public detail::RNNCellImplBase { + public: + GRUCellImpl(int64_t input_size, int64_t hidden_size) + : GRUCellImpl(GRUCellOptions(input_size, hidden_size)) {} + explicit GRUCellImpl(const GRUCellOptions& options_); + + Tensor forward(const Tensor& input, Tensor hx = {}); + + protected: + FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())}) + + public: + GRUCellOptions options; +}; + +/// A `ModuleHolder` subclass for `GRUCellImpl`. +/// See the documentation for `GRUCellImpl` class to learn what methods it +/// provides, and examples of how to use `GRUCell` with +/// `torch::nn::GRUCellOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(GRUCell); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/transformer.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/transformer.h new file mode 100644 index 0000000000000000000000000000000000000000..c8c417c7564b3705cfe58f887bf43a03b7573f53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/transformer.h @@ -0,0 +1,143 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include + +namespace torch { +namespace nn { + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Transformer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// A transformer model. User is able to modify the attributes as needed. The +/// architecture is based on the paper "Attention Is All You Need". Ashish +/// Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N +/// Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. +/// In Advances in Neural Information Processing Systems, pages 6000-6010. +/// +/// See https://pytorch.org/docs/stable/generated/torch.nn.Transformer.html to +/// learn about the exact behavior of this transformer model +/// +/// See the documentation for `torch::nn::Transformer` class to learn what +/// constructor arguments are supported for this encoder layer model +/// +/// Example: +/// ``` +/// Transformer trans(TransformerOptions(512, 8)); +/// ``` +class TORCH_API TransformerImpl : public Cloneable { + public: + explicit TransformerImpl(TransformerOptions options_); + + /// forward function for Transformer Module + /// Args: + /// src: the sequence to the encoder (required). + /// tgt: the sequence to the decoder (required). + /// src_mask: the additive mask for the src sequence (optional). + /// tgt_mask: the additive mask for the tgt sequence (optional). + /// memory_mask: the additive mask for the encoder output (optional). + /// src_key_padding_mask: the ByteTensor mask for src keys per batch + /// (optional). tgt_key_padding_mask: the ByteTensor mask for tgt keys per + /// batch (optional). memory_key_padding_mask: the ByteTensor mask for + /// memory keys per batch (optional). + /// + /// Shape: + /// src: `(S, N, E)` + /// tgt: `(T, N, E)` + /// src_mask: `(S, S)` + /// tgt_mask: `(T, T)` + /// memory_mask: `(T, S)` + /// src_key_padding_mask: `(N, S)` + /// tgt_key_padding_mask: `(N, T)` + /// memory_key_padding_mask: `(N, S)` + /// + /// Note: + /// [src/tgt/memory]_mask ensures that position i is allowed to attend the + /// unmasked positions. If a ByteTensor is provided, the non-zero + /// positions are not allowed to attend while the zero positions will be + /// unchanged. If a BoolTensor is provided, positions with `True` are not + /// allowed to attend while `False` values will be unchanged. If a + /// FloatTensor is provided, it will be added to the attention weight. + /// + /// [src/tgt/memory]_key_padding_mask provides specified elements in the + /// key to be ignored by the attention. If a ByteTensor is provided, the + /// non-zero positions will be ignored while the zero positions will be + /// unchanged. If a BoolTensor is provided, the positions with the value + /// of `True` will be ignored while the position with the value of `False` + /// will be unchanged. + /// + /// output: `(T, N, E)` + /// + /// Note: + /// Due to the multi-head attention architecture in the transformer model, + /// the output sequence length of a transformer is same as the input + /// sequence (i.e. target) length of the decode. + /// + /// where + /// S is the source sequence length, + /// T is the target sequence length, + /// N is the batch size, + /// E is the feature number. + Tensor forward( + const Tensor& src, + const Tensor& tgt, + const Tensor& src_mask = {}, + const Tensor& tgt_mask = {}, + const Tensor& memory_mask = {}, + const Tensor& src_key_padding_mask = {}, + const Tensor& tgt_key_padding_mask = {}, + const Tensor& memory_key_padding_mask = {}); + + void reset() override; + + void reset_parameters(); + + /// Generate a square mask for the sequence. + /// The masked positions are filled with `-inf` in float type. + /// Unmasked positions are filled with `0.0` in float type. + /// Note: + /// 1. This function will always return a CPU tensor. + /// 2. This function requires the platform support IEEE754, since `-inf` is + /// guaranteed to + /// be valid only when IEEE754 is supported. If the platform doesn't + /// support IEEE754, this function will fill the mask with the smallest + /// float number instead of `-inf`, a one time warning will pop up as + /// well. + static Tensor generate_square_subsequent_mask(int64_t sz); + + protected: + FORWARD_HAS_DEFAULT_ARGS( + {2, AnyValue(Tensor())}, + {3, AnyValue(Tensor())}, + {4, AnyValue(Tensor())}, + {5, AnyValue(Tensor())}, + {6, AnyValue(Tensor())}, + {7, AnyValue(Tensor())}) + + public: + /// options with which this `Transformer` was constructed + TransformerOptions options; + + /// encoder module + AnyModule encoder; + + /// decoder module + AnyModule decoder; +}; + +/// A `ModuleHolder` subclass for `TransformerImpl`. +/// See the documentation for `TransformerImpl` class to learn what +/// methods it provides, and examples of how to use `Transformer` with +/// `torch::nn::TransformerOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(Transformer); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/transformercoder.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/transformercoder.h new file mode 100644 index 0000000000000000000000000000000000000000..fd1998449abd128c9431487d0af3543ce01a228d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/transformercoder.h @@ -0,0 +1,154 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +namespace torch { +namespace nn { + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TransformerEncoder +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// TransformerEncoder module. +/// See +/// https://pytorch.org/docs/master/generated/torch.nn.TransformerEncoder.html +/// to learn abouut the exact behavior of this encoder layer module. +/// +/// See the documentation for `torch::nn::TransformerEncoder` class to learn +/// what constructor arguments are supported for this encoder module. +/// +/// Example: +/// ``` +/// TransformerEncoderLayer encoderLayer(TransformerEncoderLayerOptions(512, +/// 8).dropout(0.1)); TransformerEncoder +/// encoder(TransformerEncoderOptions(encoderLayer, +/// 6).norm(LayerNorm(LayerNormOptions({2})))); +/// ``` +class TORCH_API TransformerEncoderImpl + : public Cloneable { + public: + TransformerEncoderImpl( + TransformerEncoderLayer encoder_layer, + int64_t num_layers) + : TransformerEncoderImpl( + TransformerEncoderOptions(encoder_layer, num_layers)) {} + explicit TransformerEncoderImpl(TransformerEncoderOptions options_); + + Tensor forward( + const Tensor& src, + const Tensor& src_mask = {}, + const Tensor& src_key_padding_mask = {}); + + void reset() override; + + void reset_parameters(); + + protected: + FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())}, {2, AnyValue(Tensor())}) + + public: + /// options with which this `TransformerEncoder` was constructed + TransformerEncoderOptions options; + + /// module list that contains all the encoder layers + ModuleList layers = nullptr; + + /// optional normalization module + AnyModule norm; +}; + +/// A `ModuleHolder` subclass for `TransformerEncoderImpl`. +/// See the documentation for `TransformerEncoderImpl` class to learn what +/// methods it provides, and examples of how to use `TransformerEncoder` with +/// `torch::nn::TransformerEncoderOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(TransformerEncoder); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TransformerDecoder +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// TransformerDecoder is a stack of N decoder layers. +/// See +/// https://pytorch.org/docs/master/generated/torch.nn.TransformerDecoder.html +/// to learn abouut the exact behavior of this decoder module +/// +/// See the documentation for `torch::nn::TransformerDecoderOptions` class to +/// learn what constructor arguments are supported for this decoder module +/// +/// Example: +/// ``` +/// TransformerDecoderLayer decoder_layer(TransformerDecoderLayerOptions(512, +/// 8).dropout(0.1)); TransformerDecoder +/// transformer_decoder(TransformerDecoderOptions(decoder_layer, +/// 6).norm(LayerNorm(LayerNormOptions({2})))); const auto memory = +/// torch::rand({10, 32, 512}); const auto tgt = torch::rand({20, 32, 512}); +/// auto out = transformer_decoder(tgt, memory); +/// ``` +class TORCH_API TransformerDecoderImpl + : public Cloneable { + public: + TransformerDecoderImpl( + TransformerDecoderLayer decoder_layer, + int64_t num_layers) + : TransformerDecoderImpl( + TransformerDecoderOptions(decoder_layer, num_layers)) {} + explicit TransformerDecoderImpl(TransformerDecoderOptions options_); + + void reset() override; + + void reset_parameters(); + + /// Pass the inputs (and mask) through the decoder layer in turn. + /// Args: + /// tgt: the sequence to the decoder layer (required). + /// memory: the sequence from the last layer of the encoder (required). + /// tgt_mask: the mask for the tgt sequence (optional). + /// memory_mask: the mask for the memory sequence (optional). + /// tgt_key_padding_mask: the mask for the tgt keys per batch + /// (optional). memory_key_padding_mask: the mask for the memory keys + /// per batch (optional). + Tensor forward( + const Tensor& tgt, + const Tensor& memory, + const Tensor& tgt_mask = {}, + const Tensor& memory_mask = {}, + const Tensor& tgt_key_padding_mask = {}, + const Tensor& memory_key_padding_mask = {}); + + /// The options used to configure this module. + TransformerDecoderOptions options; + + /// Cloned layers of decoder layers + ModuleList layers{nullptr}; + + /// optional layer normalization module + AnyModule norm; + + protected: + FORWARD_HAS_DEFAULT_ARGS( + {2, AnyValue(Tensor())}, + {3, AnyValue(Tensor())}, + {4, AnyValue(Tensor())}, + {5, AnyValue(Tensor())}) +}; + +/// A `ModuleHolder` subclass for `TransformerDecoderImpl`. +/// See the documentation for `TransformerDecoderImpl` class to learn what +/// methods it provides, and examples of how to use `TransformerDecoder` with +/// `torch::nn::TransformerDecoderOptions`. +/// See the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +TORCH_MODULE(TransformerDecoder); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/transformerlayer.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/transformerlayer.h new file mode 100644 index 0000000000000000000000000000000000000000..0378226b156372f8e7b33058053602b002c7d64f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/transformerlayer.h @@ -0,0 +1,195 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +namespace torch { +namespace nn { + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TransformerEncoderLayer +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// TransformerEncoderLayer module. +/// See +/// https://pytorch.org/docs/master/generated/torch.nn.TransformerEncoderLayer.html +/// to learn abouut the exact behavior of this encoder layer model +/// +/// See the documentation for `torch::nn::TransformerEncoderLayer` class to +/// learn what constructor arguments are supported for this encoder layer model +/// +/// Example: +/// ``` +/// TransformerEncoderLayer encoderLayer(TransformerEncoderLayerOptions(512, +/// 8).dropout(0.1)); +/// ``` +class TORCH_API TransformerEncoderLayerImpl + : public Cloneable { + public: + TransformerEncoderLayerImpl(int64_t d_model, int64_t nhead) + : TransformerEncoderLayerImpl( + TransformerEncoderLayerOptions(d_model, nhead)) {} + explicit TransformerEncoderLayerImpl(TransformerEncoderLayerOptions options_); + + Tensor forward( + const Tensor& src, + const Tensor& src_mask = {}, + const Tensor& src_key_padding_mask = {}); + + void reset() override; + + void reset_parameters(); + + protected: + FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())}, {2, AnyValue(Tensor())}) + + public: + /// options with which this `TransformerEncoderLayer` was constructed + TransformerEncoderLayerOptions options; + + /// self attention + MultiheadAttention self_attn = nullptr; + + /// feedforward first linear layer + Linear linear1 = nullptr; + + /// feedforward dropout layer + Dropout dropout = nullptr; + + /// feedforward second linear layer + Linear linear2 = nullptr; + + /// pre feedforward, normalization layer + LayerNorm norm1 = nullptr; + /// post feedfastward, normalization layer + LayerNorm norm2 = nullptr; + + /// pre feedfastward, dropout layer + Dropout dropout1 = nullptr; + /// post feedfastward, dropout layer + Dropout dropout2 = nullptr; +}; + +/// A `ModuleHolder` subclass for `TransformerEncoderLayerImpl``. +/// See the documentation for `TransformerEncoderLayerImpl` class to learn what +/// methods it provides, and examples of how to use `TransformerEncoderLayer` +/// with `torch::nn::TransformerEncoderLayerOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(TransformerEncoderLayer); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TransformerDecoderLayer +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// TransformerDecoderLayer is made up of self-attn, multi-head-attn and +/// feedforward network. This standard decoder layer is based on the paper +/// "Attention Is All You Need". Ashish Vaswani, Noam Shazeer, Niki Parmar, +/// Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia +/// Polosukhin. 2017. Attention is all you need. In Advances in Neural +/// Information Processing Systems, pages 6000-6010. Users may modify or +/// implement in a different way during application. See +/// https://pytorch.org/docs/master/nn.html#transformer-layers to learn about +/// the exact behavior of this module. +/// +/// See the documentation for `torch::nn::TransformerDecoderLayerOptions` class +/// to learn what constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// TransformerDecoderLayer model(TransformerDecoderLayerOptions(512, +/// 8).dropout(0.2)); +/// ``` +class TORCH_API TransformerDecoderLayerImpl + : public Cloneable { + public: + TransformerDecoderLayerImpl(int64_t d_model, int64_t nhead) + : TransformerDecoderLayerImpl( + TransformerDecoderLayerOptions(d_model, nhead)) {} + explicit TransformerDecoderLayerImpl(TransformerDecoderLayerOptions options_); + + void reset() override; + + void reset_parameters(); + + /// Pass the inputs (and mask) through the decoder layer. + /// Args: + /// tgt: the sequence to the decoder layer (required). + /// memory: the sequence from the last layer of the encoder (required). + /// tgt_mask: the mask for the tgt sequence (optional). + /// memory_mask: the mask for the memory sequence (optional). + /// tgt_key_padding_mask: the mask for the tgt keys per batch + /// (optional). memory_key_padding_mask: the mask for the memory keys + /// per batch (optional). + Tensor forward( + Tensor tgt, + const Tensor& memory, + const Tensor& tgt_mask = {}, + const Tensor& memory_mask = {}, + const Tensor& tgt_key_padding_mask = {}, + const Tensor& memory_key_padding_mask = {}); + + /// The options used to configure this module. + TransformerDecoderLayerOptions options; + + /// self attention + MultiheadAttention self_attn{nullptr}; + + /// Dropout, post self attention + Dropout dropout1{nullptr}; + + /// Normalization, post self attention + LayerNorm norm1{nullptr}; + + /// Multi-headed attention + MultiheadAttention multihead_attn{nullptr}; + + /// Dropout, post multi-headed attention + Dropout dropout2{nullptr}; + + /// Normalization, post multi-headed attention + LayerNorm norm2{nullptr}; + + /// Feed forward first linear layer + Linear linear1{nullptr}; + + /// Feed forward dropout layer + Dropout dropout{nullptr}; + + /// Feed forward second linear layer + Linear linear2{nullptr}; + + /// Dropout, post feed forward + Dropout dropout3{nullptr}; + + /// Normalization, post feed forward + LayerNorm norm3{nullptr}; + + protected: + FORWARD_HAS_DEFAULT_ARGS( + {2, AnyValue(Tensor())}, + {3, AnyValue(Tensor())}, + {4, AnyValue(Tensor())}, + {5, AnyValue(Tensor())}) + + /// Apply activation based on configuration + Tensor activation(const Tensor& input); +}; + +/// A `ModuleHolder` subclass for `TransformerDecoderLayerImpl`. +/// See the documentation for `TransformerDecoderLayerImpl` class to learn what +/// methods it provides, and examples of how to use `TransformerDecoderLayer` +/// with `torch::nn::TransformerDecoderLayerOptions`. See the documentation for +/// `ModuleHolder` to learn about PyTorch's module storage semantics. +TORCH_MODULE(TransformerDecoderLayer); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/upsampling.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/upsampling.h new file mode 100644 index 0000000000000000000000000000000000000000..6db8b04d574aa7116bad081e5167d106bc89b8ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/upsampling.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace torch { +namespace nn { + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Upsample ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D +/// (volumetric) data. +/// See https://pytorch.org/docs/master/nn.html#torch.nn.Upsample to learn +/// about the exact behavior of this module. +/// +/// See the documentation for `torch::nn::UpsampleOptions` class to learn what +/// constructor arguments are supported for this module. +/// +/// Example: +/// ``` +/// Upsample +/// model(UpsampleOptions().scale_factor({3}).mode(torch::kLinear).align_corners(false)); +/// ``` +class TORCH_API UpsampleImpl : public Cloneable { + public: + explicit UpsampleImpl(const UpsampleOptions& options_ = {}); + + void reset() override; + + /// Pretty prints the `Upsample` module into the given `stream`. + void pretty_print(std::ostream& stream) const override; + + Tensor forward(const Tensor& input); + + /// The options with which this `Module` was constructed. + UpsampleOptions options; +}; + +/// A `ModuleHolder` subclass for `UpsampleImpl`. +/// See the documentation for `UpsampleImpl` class to learn what methods it +/// provides, and examples of how to use `Upsample` with +/// `torch::nn::UpsampleOptions`. See the documentation for `ModuleHolder` to +/// learn about PyTorch's module storage semantics. +TORCH_MODULE(Upsample); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..6d3d383465f3318e8bf785eeef88a94a4aff2bdd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/utils.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include +#include + +#include + +namespace torch { +namespace nn { +namespace modules { +namespace utils { + +// Reverse the order of `t` and repeat each element for `n` times. +// This can be used to translate padding arg used by Conv and Pooling modules +// to the ones used by `F::pad`. +// +// This mirrors `_reverse_repeat_tuple` in `torch/nn/modules/utils.py`. +inline std::vector _reverse_repeat_vector( + at::ArrayRef t, + int64_t n) { + TORCH_INTERNAL_ASSERT(n >= 0); + std::vector ret; + ret.reserve(t.size() * n); + for (auto rit = t.rbegin(); rit != t.rend(); ++rit) { + for (const auto i : c10::irange(n)) { + (void)i; // Suppress unused variable + ret.emplace_back(*rit); + } + } + return ret; +} + +inline std::vector _list_with_default( + torch::ArrayRef> out_size, + torch::IntArrayRef defaults) { + TORCH_CHECK( + defaults.size() > out_size.size(), + "Input dimension should be at least ", + out_size.size() + 1); + std::vector ret; + torch::IntArrayRef defaults_slice = + defaults.slice(defaults.size() - out_size.size(), out_size.size()); + for (const auto i : c10::irange(out_size.size())) { + auto v = out_size.at(i); + auto d = defaults_slice.at(i); + ret.emplace_back(v.has_value() ? v.value() : d); + } + return ret; +} + +} // namespace utils +} // namespace modules +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options.h new file mode 100644 index 0000000000000000000000000000000000000000..4a5224a478e1b650e393dcb3f95adc13ab36d65f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/parallel/data_parallel.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/parallel/data_parallel.h new file mode 100644 index 0000000000000000000000000000000000000000..46bf2ac6953e7b5ba5bdbd93fe63d7c91db5c6e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/parallel/data_parallel.h @@ -0,0 +1,297 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +namespace { + +// Note [Replicating Modules] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Module replication is implemented in the following two steps: +// 1) create a module replica on each destination device using Module.clone(). +// 2) manually add a gradient edge pointing from every parameter X in every +// module replica to the same parameter X in the original module, using +// ReduceAdd as the grad_fn. +// +// ReduceAdd can ONLY be used during the backward pass of data parallel. Forward +// pass cannot use this function as it does not setup gradient function and +// history at all. Do NOT try to use ReduceAdd for any other purposes. +// +// NB: An alternative is to add Broadcast and ReduceAddCoalesce to +// torch/csrc/autograd/functions/comm.cpp as normal autograd functions, +// implement a Replicatable (like cloneable) class and add it as a friend class +// in Module.h. In the forward pass, the Replicatable could use the Broadcast +// function to replicate every module parameter and set gradient functions using +// ReduceAddCoalesce (like how it is implemented in Python). However, unlike in +// Python, where changes to Linear._parameters["weight"] would also apply to +// Linear.weight (using Linear as an example), Linear.weight and +// Linear.parameters_["weight"] are two tensor objects pointing to the same +// TensorImpl. Assigning a new tensor to Linear.parameters_["weight"] will not +// change Linear.weight. To make this work, we will have to: +// 1) force every module to also inherit from Replicatable +// 2) force every module to implement an additional function, e.g., +// Replicatable::load_params(), to pick up changes from parameters_ to their +// own member fields. +// This will be an overkill as Replicatable will only be used in data_parallel, +// not even ddp. + +// Autograd function for the replicate step in data parallel. This is only used +// in data parallel, and should not be exposed as a user API. +struct ReduceAdd : public autograd::Node { + explicit ReduceAdd(const at::Device& destination_device) + : destination_device_(destination_device){}; + ~ReduceAdd() override {} + + autograd::variable_list apply(autograd::variable_list&& inputs) override { + TORCH_CHECK( + !torch::autograd::compute_requires_grad(inputs), + "ReduceAdd can only be used during the backward pass of data parallel."); + + Tensor output = torch::zeros_like(inputs[0], {destination_device_}); + + for (auto& input : inputs) { + TORCH_CHECK( + input.sizes() == inputs[0].sizes(), + "All inputs of ReduceAdd must have the same size, but got ", + input.sizes(), + " and ", + inputs[0].sizes()); + + TORCH_CHECK( + input.dtype() == inputs[0].dtype(), + "All inputs of ReduceAdd must have the same dtype, but got ", + input.dtype(), + " and ", + inputs[0].dtype()); + + // TODO: use nccl reduce + output.add_(input.to(destination_device_)); + } + + return {output}; + } + + private: + at::Device destination_device_; +}; + +} // namespace + +// A friend function to Module, it recursively sets gradient edges pointing from +// every parameter X in every module replica to the same parameter X in the +// original module. See [Replicating Modules] +template +void replicate_grad_edges( + const std::shared_ptr& module, + const std::vector>& replicas, + const std::vector& devices) { + for (auto& parameter : module->named_parameters(/*recurse=*/false)) { + auto grad_fn = std::make_shared((*parameter).device()); + grad_fn->set_next_edges(autograd::collect_next_edges(*parameter)); + + for (const auto i : c10::irange(devices.size())) { + autograd::set_history(replicas[i]->parameters_[parameter.key()], grad_fn); + } + } + + for (auto& buffer : module->named_buffers(/*recurse=*/false)) { + if (buffer.value().requires_grad()) { + auto grad_fn = std::make_shared((*buffer).device()); + grad_fn->set_next_edges(autograd::collect_next_edges(*buffer)); + + for (const auto i : c10::irange(devices.size())) { + autograd::set_history(replicas[i]->buffers_[buffer.key()], grad_fn); + } + } + } + + for (auto& child : module->children_) { + std::vector> child_replicas; + child_replicas.reserve(devices.size()); + for (auto& replica : replicas) { + child_replicas.push_back(replica->children_[child.key()]); + } + + // recursively set gradient edges for all children + replicate_grad_edges(*child, child_replicas, devices); + } +} + +namespace parallel { + +/// Replicates a module on the given list of devices. +/// A replica is created by calling `clone()` on the module. For this, the +/// module must inherit from `nn::Cloneable`, or define its own `clone()` +/// method, which is expected to perform a deep copy of the module. +template +std::vector> replicate( + const std::shared_ptr& module, + const std::vector& devices) { + std::vector> replicas; + replicas.reserve(devices.size()); + for (const auto& device : devices) { + replicas.push_back( + std::dynamic_pointer_cast(module->clone(device))); + } + // Configure gradient edges to point from replcia parameters to original + // module parameters. See [Replicating Modules] + replicate_grad_edges(module, replicas, devices); + return replicas; +} + +/// Replicates a module holder on the given list of devices. +/// This method allows calling `replicate()` with a module holder, such as +/// `Linear`. +template +std::vector> replicate( + const ModuleHolder& module, + const std::vector& devices) { + auto ptrs = replicate(module.ptr(), devices); + return std::vector>(ptrs.begin(), ptrs.end()); +} + +/// Applies the given inputs to the given modules in a parallel fashion. +/// Conceptually, a thread is spawned for each `(module, input)` pair, in which +/// `forward()` is called on the module with its corresponding input. The +/// outputs of the individual calls are stored in a vector and returned. +/// +/// The first exception caught by any thread is stashed and rethrown after all +/// threads have completed their operation. +/// +/// Further remarks: +/// 1. The length of the module container must match the length of the inputs. +/// 2. If a list of devices is supplied, it must match the list of modules in +/// length. Each device will be set to the current default device during the +/// invocation of the respective module. This means any tensors allocated on the +/// default device inside the module will be constructed on this device. +template +std::vector parallel_apply( + std::vector& modules, + const std::vector& inputs, + const optional>& devices = nullopt) { + TORCH_CHECK( + modules.size() == inputs.size(), "Must have as many inputs as modules"); + if (devices) { + TORCH_CHECK( + modules.size() == devices->size(), + "Must have as many devices as modules"); + } + + std::vector outputs(modules.size()); + std::mutex mutex; + + // std::exception_ptr can be passed between threads: + // > An instance of std::exception_ptr may be passed to another function, + // > possibly on another thread, where the exception may be rethrown [...]. + // https://en.cppreference.com/w/cpp/error/exception_ptr + std::exception_ptr exception; + + at::parallel_for( + /*begin=*/0, + /*end=*/modules.size(), + /*grain_size=*/1, + [&modules, &inputs, &devices, &outputs, &mutex, &exception]( + int64_t index, int64_t stop) { + for (; index < stop; ++index) { + try { + auto output = modules[index]->forward(inputs[index]); + output = + output.to(devices ? (*devices)[index] : inputs[index].device()); + std::lock_guard lock(mutex); + outputs[index] = output; + } catch (...) { + std::lock_guard lock(mutex); + if (!exception) { + exception = std::current_exception(); + } + } + } + }); + + if (exception) { + std::rethrow_exception(exception); + } + + return outputs; +} + +/// Evaluates `module(input)` in parallel across the given `devices`. If +/// `devices` is not supplied, the invocation is parallelized across all +/// available CUDA devices. If `output_device` is supplied, the final, combined +/// tensor will be placed on this device. If not, it defaults to the first +/// device in `devices`. +/// +/// In detail, this method performs the following four distinct steps: +/// 1. *Scatter* the input to the given devices, +/// 2. *Replicate* (deep clone) the model on each device, +/// 3. *Evaluate* each module with its input on its device, +/// 4. *Gather* the outputs of each replica into a single output tensor, located +/// on the `output_device`. +template +Tensor data_parallel( + ModuleType module, + Tensor input, + optional> devices = nullopt, + optional output_device = nullopt, + int64_t dim = 0) { + if (!devices) { + const auto device_count = torch::cuda::device_count(); + TORCH_CHECK( + device_count > 0, "Expected at least one CUDA device to be available"); + devices = std::vector(); + devices->reserve(device_count); + for (const auto index : c10::irange(device_count)) { + devices->emplace_back(kCUDA, static_cast(index)); + } + } + if (!output_device) { + output_device = devices->front(); + } + + if (devices->size() == 1) { + module->to(devices->front()); + input = input.to(devices->front()); + return module->forward(std::move(input)).to(*output_device); + } + + autograd::Scatter scatter(*devices, /*chunk_sizes=*/nullopt, dim); + auto scattered_inputs = fmap(scatter.apply({std::move(input)})); + // Input tensor might not be big enough to scale across all available devices + if (scattered_inputs.size() < devices->size()) { + devices->resize( + scattered_inputs.size(), + Device(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)); + } + + auto replicas = replicate(module, *devices); + auto outputs = parallel_apply(replicas, scattered_inputs, *devices); + return autograd::Gather(*output_device, dim) + .apply(fmap(std::move(outputs))) + .front(); +} + +} // namespace parallel +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl-inl.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..b38e6cf2c0ff729485cf4a27a1ae49818d06c807 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl-inl.h @@ -0,0 +1,74 @@ +// This class exists only to do SFINAE on abstract types `T` that are really +// `ModuleHolder`, because there's no good way to say that `T` is a +// `ModuleHolder` over some unknown type `ModuleType`. With this, you can do +// `enable_if_t>`. +struct ModuleHolderIndicator {}; + +// A type trait that is true for types that are `ModuleHolder`s. +template +using is_module_holder = std::is_base_of>; + +template +using disable_if_module_holder_t = disable_if_t::value>; + +// A collection of templates that answer the question whether a type `T` is a +// `ModuleHolder`, and if so whether its contained type is of type `C`. This is +// tricky because it is hard to short circuit in template metaprogramming. A +// naive and incorrect solution to this problem would be something like +// `disable_if::value && typename T::ContainedType == C>`. +// This would disable all types that are not `ModuleHolder`s, because even +// though the `is_module_holder::value` may be `false` for such types the +// `T::ContainedType` access would be ill-formed and thus fail the whole +// expression by the rules of SFINAE. Instead we have to use template +// specialization to statically branch on the first condition +// (`is_module_holder`) and are only then allowed to query +// `T::ContainedType` in the branch for which the condition was true. + +// Base template. +template +struct is_module_holder_of_impl; + +// False branch. `T` is not a `ModuleHolder` and thus not a `ModuleHolder` with +// contained type `C`. +template +struct is_module_holder_of_impl : std::false_type {}; + +// True branch. `T` is a `ModuleHolder` and thus we can legit access its +// `ContainedType` and compare it against `C`. +template +struct is_module_holder_of_impl + : std::is_same {}; + +// Helper template. +template +struct is_module_holder_of : is_module_holder_of_impl< + is_module_holder::value, + decay_t, + decay_t> {}; + +// A collection of templates that allow deducing the return type of the +// `forward()` method, but only if a module actually has a `forward()` method, +// and otherwise deduces to the type `void`. + +template +struct return_type_of_forward_impl; + +template +struct return_type_of_forward_impl { + using type = decltype(::std::declval().forward(::std::declval()...)); +}; + +template +struct return_type_of_forward_impl { + using type = void; +}; + +template +using return_type_of_forward = return_type_of_forward_impl< + torch::detail::has_forward::value, + C, + Args...>; + +template +using return_type_of_forward_t = + typename return_type_of_forward::type; diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl.h new file mode 100644 index 0000000000000000000000000000000000000000..d66d83c257ebd0061b5fa59e1299dd16ff9badb8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl.h @@ -0,0 +1,214 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include +#include + +namespace torch { +namespace detail { +// Dump all the template metaprogramming in this file. +#include +} // namespace detail + +namespace nn { + +/// A `ModuleHolder` is essentially a wrapper around `std::shared_ptr` where +/// `M` is an `nn::Module` subclass, with convenient constructors defined for +/// the kind of constructions we want to allow for our modules. +template +class ModuleHolder : torch::detail::ModuleHolderIndicator { + protected: + /// The module pointer this class wraps. + /// NOTE: Must be placed at the top of the class so that we can use it with + /// trailing return types below. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::shared_ptr impl_; + + public: + using ContainedType = Contained; + + /// Default constructs the contained module if if has a default constructor, + /// else produces a static error. + /// + /// NOTE: This uses the behavior of template + /// classes in C++ that constructors (or any methods) are only compiled when + /// actually used. + ModuleHolder() : impl_(default_construct()) { + static_assert( + std::is_default_constructible::value, + "You are trying to default construct a module which has " + "no default constructor. Use = nullptr to give it the empty state " + "(e.g. `Linear linear = nullptr;` instead of `Linear linear;`)."); + } + + /// Constructs the `ModuleHolder` with an empty contained value. Access to + /// the underlying module is not permitted and will throw an exception, until + /// a value is assigned. + /* implicit */ ModuleHolder(std::nullptr_t) : impl_(nullptr) {} + + /// Constructs the `ModuleHolder` with a contained module, forwarding all + /// arguments to its constructor. + template < + typename Head, + typename... Tail, + typename = typename std::enable_if< + !(torch::detail::is_module_holder_of::value && + (sizeof...(Tail) == 0))>::type> + explicit ModuleHolder(Head&& head, Tail&&... tail) + : impl_(new Contained( + std::forward(head), + std::forward(tail)...)) {} + + /// Constructs the `ModuleHolder` from a pointer to the contained type. + /// Example: `Linear(std::make_shared(...))`. + /* implicit */ ModuleHolder(std::shared_ptr module) + : impl_(std::move(module)) {} + + /// Returns true if the `ModuleHolder` contains a module, or false if it is + /// `nullptr`. + explicit operator bool() const noexcept { + return !is_empty(); + } + + /// Forwards to the contained module. + Contained* operator->() { + return get(); + } + + /// Forwards to the contained module. + const Contained* operator->() const { + return get(); + } + + /// Returns a reference to the contained module. + Contained& operator*() { + return *get(); + } + + /// Returns a const reference to the contained module. + const Contained& operator*() const { + return *get(); + } + + /// Returns a shared pointer to the underlying module. + const std::shared_ptr& ptr() const { + TORCH_CHECK(!is_empty(), "Accessing empty ModuleHolder"); + return impl_; + } + + /// Returns a pointer to the underlying module. + Contained* get() { + TORCH_CHECK(!is_empty(), "Accessing empty ModuleHolder"); + return impl_.get(); + } + + /// Returns a const pointer to the underlying module. + const Contained* get() const { + TORCH_CHECK(!is_empty(), "Accessing empty ModuleHolder"); + return impl_.get(); + } + + /// Calls the `forward()` method of the contained module. + template + auto operator()(Args&&... args) + -> torch::detail::return_type_of_forward_t { + // This will not compile if the module does not have a `forward()` method + // (as expected). + // NOTE: `std::forward` is qualified to prevent VS2017 emitting + // error C2872: 'std': ambiguous symbol + return impl_->forward(::std::forward(args)...); + } + + /// Forwards to the subscript operator of the contained module. + /// NOTE: std::forward is qualified to prevent VS2017 emitting + /// error C2872: 'std': ambiguous symbol + template + decltype(auto) operator[](Arg&& arg) { + return (*impl_)[::std::forward(arg)]; + } + + /// Returns true if the `ModuleHolder` does not contain a module. + bool is_empty() const noexcept { + return impl_ == nullptr; + } + + private: + /// In C++17, the two methods below could be written as the following: + /// if constexpr (std::is_default_constructible_v) { + /// return std::make_shared(); + /// } else { + /// return nullptr; + /// } + /// In C++11, we use SFINAE instead of `if constexpr`. + + template < + typename T = Contained, + typename = torch::enable_if_t::value>> + std::shared_ptr default_construct() { + return std::make_shared(); + } + + template + torch::disable_if_t< + std::is_default_constructible::value, + std::shared_ptr> + default_construct() { + return nullptr; + } +}; + +/// Pretty prints the given `Module` into the `ostream`. +template +std::ostream& operator<<( + std::ostream& stream, + const nn::ModuleHolder& module) { + return stream << *module; +} + +/// Serializes a `ModuleHolder` into an `OutputArchive`. +template +serialize::OutputArchive& operator<<( + serialize::OutputArchive& archive, + const nn::ModuleHolder& module) { + return archive << module.ptr(); +} + +/// Deserializes a `ModuleHolder` from an `InputArchive`. +template +serialize::InputArchive& operator>>( + serialize::InputArchive& archive, + nn::ModuleHolder& module) { + return archive >> module.ptr(); +} + +} // namespace nn +} // namespace torch + +// Workaround for CUDA 10.2 and below not allowing attribute unused on +// using declarations. +#ifdef __CUDACC__ +#define TORCH_UNUSED_EXCEPT_CUDA +#else +#define TORCH_UNUSED_EXCEPT_CUDA C10_UNUSED +#endif + +/// Defines a class `Name` which inherits from `nn::ModuleHolder` to provide a +/// wrapper over a `std::shared_ptr`. +/// `Impl` is a type alias for `ImplType` which provides a way to call static +/// method of `ImplType`. +#define TORCH_MODULE_IMPL(Name, ImplType) \ + class Name : public torch::nn::ModuleHolder { /* NOLINT */ \ + public: \ + using torch::nn::ModuleHolder::ModuleHolder; \ + using Impl TORCH_UNUSED_EXCEPT_CUDA = ImplType; \ + } + +/// Like `TORCH_MODULE_IMPL`, but defaults the `ImplType` name to `Impl`. +#define TORCH_MODULE(Name) TORCH_MODULE_IMPL(Name, Name##Impl) diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..8dbfaf5126e4f3db94174937432ea4b017354ab7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils.h @@ -0,0 +1,5 @@ +#pragma once + +#include +#include +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/clip_grad.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/clip_grad.h new file mode 100644 index 0000000000000000000000000000000000000000..e1023bd1eb5c7b88895c6dc5b349d3b4a976f226 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/clip_grad.h @@ -0,0 +1,147 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace nn { +namespace utils { + +// Clips gradient norm of a vector of Tensors. +// See +// https://pytorch.org/docs/stable/nn.html?highlight=clip_grad_norm#torch.nn.utils.clip_grad_norm_ +// for more details about this module. +// +// Difference with the python version: unlike the python version, even when +// skipping the finiteness checks (error_if_nonfinite = false), this function +// will introduce a device <=> CPU synchronization (for devices where that makes +// sense!) in order to return a CPU-side `double`. This C++ version therefore +// cannot be run fully asynchronously w.r.t. the device of the gradients. +inline double clip_grad_norm_( + const std::vector& parameters, + double max_norm, + double norm_type = 2.0, + bool error_if_nonfinite = false) { + std::vector params_with_grad; + + for (const auto& param : parameters) { + auto& grad = param.grad(); + if (grad.defined()) { + params_with_grad.push_back(param); + } + } + + if (params_with_grad.empty()) { + return 0.0; + } + + Tensor total_norm_tensor; + if (norm_type == std::numeric_limits::infinity()) { + std::vector norms; + norms.reserve(params_with_grad.size()); + + for (const auto& param : params_with_grad) { + norms.emplace_back(param.grad().data().abs().max()); + } + total_norm_tensor = + (norms.size() == 1) ? norms[0] : torch::max(torch::stack(norms)); + } else if (norm_type == 0) { + total_norm_tensor = + torch::full({}, static_cast(params_with_grad.size())); + } else { + std::vector norms; + norms.reserve(params_with_grad.size()); + + for (const auto& param : params_with_grad) { + norms.emplace_back(param.grad().data().norm(norm_type)); + } + total_norm_tensor = + (norms.size() == 1) ? norms[0] : torch::stack(norms).norm(norm_type); + } + + // When possible (ie when skipping the finiteness check), we avoid + // synchronizing the CPU and the gradients' device until the very end to + // preserve async execution on the device. When checking for finite-ness, this + // optional ensures we only sync once. + c10::optional total_norm = c10::nullopt; + if (error_if_nonfinite) { + total_norm = total_norm_tensor.item().toDouble(); + TORCH_CHECK( + std::isfinite(*total_norm), + "The total norm of order ", + norm_type, + " for gradients from `parameters` ", + "is non-finite, so it cannot be clipped. To disable this error and scale ", + "the gradients with the non-finite norm anyway, set ", + "`error_if_nonfinite=false`"); + } + + auto clip_coef = max_norm / (total_norm_tensor + 1e-6); + auto clip_coef_clamped = + torch::clamp(clip_coef, c10::nullopt /* min */, 1.0 /* max */); + for (auto& param : params_with_grad) { + param.grad().data().mul_(clip_coef_clamped); + } + + if (!total_norm.has_value()) { + total_norm = total_norm_tensor.item().toDouble(); + } + return *total_norm; +} + +// A wrapper around clip_grad_norm_ that allows us to call the function with a +// braced-init-list of Tensors. +inline double clip_grad_norm_( + std::initializer_list parameters, + double max_norm, + double norm_type = 2.0, + bool error_if_nonfinite = false) { + return clip_grad_norm_( + std::vector(parameters), max_norm, norm_type, error_if_nonfinite); +} + +// A wrapper around clip_grad_norm_ that allows us to call the function with a +// single Tensor. +inline double clip_grad_norm_( + Tensor parameter, + double max_norm, + double norm_type = 2.0, + bool error_if_nonfinite = false) { + std::vector params = {std::move(parameter)}; + return clip_grad_norm_( + std::move(params), max_norm, norm_type, error_if_nonfinite); +} + +// Clips gradient of an iterable of parameters at specified value. +// Gradients are modified in-place. +// See https://pytorch.org/docs/stable/nn.html#clip-grad-value +// for more details about this module. +inline void clip_grad_value_( + const std::vector& parameters, + double clip_value) { + for (const auto& param : parameters) { + if (param.grad().defined()) { + param.grad().data().clamp_(-clip_value, clip_value); + } + } +} + +// A wrapper around clip_grad_value_ that allows us to call the function with a +// braced-init-list of Tensors. +inline void clip_grad_value_( + std::initializer_list parameters, + double clip_value) { + clip_grad_value_(std::vector(parameters), clip_value); +} + +// A wrapper around clip_grad_value_ that allows us to call the function with a +// single Tensor. +inline void clip_grad_value_(Tensor parameter, double clip_value) { + std::vector params = {std::move(parameter)}; + clip_grad_value_(std::move(params), clip_value); +} + +} // namespace utils +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/convert_parameters.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/convert_parameters.h new file mode 100644 index 0000000000000000000000000000000000000000..2ac1d317c99223ae9dd8c2ca27ef7d7a20e03108 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/convert_parameters.h @@ -0,0 +1,82 @@ +#pragma once + +#include +#include + +namespace torch { +namespace nn { +namespace utils { + +// This helper function is to check if the parameters are located +// in the same device. Currently, the conversion between model parameters +// and single vector form is not supported for multiple allocations, +// e.g. parameters in different GPUs, or mixture of CPU/GPU. +inline c10::optional _check_param_device( + const torch::Tensor& param, + c10::optional old_param_device) { + // Meet the first parameter + if (old_param_device == c10::nullopt) { + old_param_device = param.is_cuda() ? param.get_device() : -1; + } else { + bool warn = false; + if (param.is_cuda()) { // Check if in same GPU + warn = (param.get_device() != old_param_device.value()); + } else { // Check if in CPU + warn = (old_param_device.value() != -1); + } + if (warn) { + TORCH_CHECK( + false, + "Found two parameters on different devices, ", + "this is currently not supported."); + } + } + + return old_param_device; +} + +// Convert parameters to one vector +inline torch::Tensor parameters_to_vector( + const std::vector& parameters) { + c10::optional param_device; + + std::vector vec; + vec.reserve(parameters.size()); + + for (const torch::Tensor& param : parameters) { + // Ensure the parameters are located in the same device + param_device = _check_param_device(param, param_device); + + vec.push_back(param.view(-1)); + } + + return torch::cat(vec); +} + +// Convert one vector to the parameters +inline void vector_to_parameters( + const torch::Tensor& vec, + const std::vector& parameters) { + // Flag for the device where the parameter is located + c10::optional param_device; + + // Pointer for slicing the vector for each parameter + int64_t pointer = 0; + for (const torch::Tensor& param : parameters) { + // Ensure the parameters are located in the same device + param_device = _check_param_device(param, param_device); + + // The length of the parameter + auto num_param = param.numel(); + // Slice the vector, reshape it, and replace the old data of the parameter + param.set_data( + vec.slice(0, pointer, pointer + num_param).view_as(param).data()); + + // Increment the pointer + pointer += num_param; + } +} + +} // namespace utils +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/rnn.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/rnn.h new file mode 100644 index 0000000000000000000000000000000000000000..eea517a2b60f371f21aaa48e06d5496938804c14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/rnn.h @@ -0,0 +1,351 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace nn { +namespace utils { +namespace rnn { + +inline Tensor invert_permutation(const Tensor& permutation) { + if (!permutation.defined()) { + return torch::Tensor(); + } + Tensor output = + torch::empty_like(permutation, torch::MemoryFormat::Contiguous); + output.scatter_( + 0, + permutation, + torch::arange(0, permutation.numel(), permutation.device())); + return output; +} + +/// Holds the data and list of `batch_sizes` of a packed sequence. +/// +/// All RNN modules accept packed sequences as inputs. +/// +/// Note: +/// Instances of this class should never be created manually. They are meant +/// to be instantiated by functions like `pack_padded_sequence`. +/// +/// Batch sizes represent the number elements at each sequence step in +/// the batch, not the varying sequence lengths passed to +/// `pack_padded_sequence`. For instance, given data ``abc`` and ``x`` +/// the :class:`PackedSequence` would contain data ``axbc`` with +/// ``batch_sizes=[2,1,1]``. +/// +/// Attributes: +/// data (Tensor): Tensor containing packed sequence +/// batch_sizes (Tensor): Tensor of integers holding +/// information about the batch size at each sequence step +/// sorted_indices (Tensor, optional): Tensor of integers holding how this +/// :class:`PackedSequence` is constructed from sequences. +/// unsorted_indices (Tensor, optional): Tensor of integers holding how this +/// to recover the original sequences with correct order. +/// +/// .. note:: +/// `data` can be on arbitrary device and of arbitrary dtype. +/// `sorted_indices` and `unsorted_indices` must be ``torch::kInt64`` +/// tensors on the same device as `data`. +/// +/// However, `batch_sizes` should always be a CPU ``torch::kInt64`` tensor. +/// +/// This invariant is maintained throughout `PackedSequence` class, +/// and all functions that construct a `PackedSequence` in libtorch +/// (i.e., they only pass in tensors conforming to this constraint). +class PackedSequence { + public: + explicit PackedSequence( + Tensor data, + Tensor batch_sizes, + Tensor sorted_indices = {}, + Tensor unsorted_indices = {}) { + // NB: if unsorted_indices is provided, it should be the inverse permutation + // to sorted_indices. Don't assert it here because the PackedSequence ctor + // should only be used internally. + if (!unsorted_indices.defined()) { + unsorted_indices = invert_permutation(sorted_indices); + } + TORCH_CHECK( + batch_sizes.device().type() == kCPU, + "batch_sizes should always be on CPU. " + "Instances of PackedSequence should never be created manually. " + "They should be instantiated by functions like pack_sequence " + "and pack_padded_sequences in nn::utils::rnn. " + "https://pytorch.org/docs/stable/nn.html#torch.nn.utils.rnn.pack_sequence"); + data_ = std::move(data); + batch_sizes_ = std::move(batch_sizes); + sorted_indices_ = std::move(sorted_indices); + unsorted_indices_ = std::move(unsorted_indices); + } + + const Tensor& data() const { + return data_; + } + + const Tensor& batch_sizes() const { + return batch_sizes_; + } + + const Tensor& sorted_indices() const { + return sorted_indices_; + } + + const Tensor& unsorted_indices() const { + return unsorted_indices_; + } + + PackedSequence pin_memory() const { + // Why not convert `batch_sizes`? + // See NOTE [ device and dtype of a PackedSequence ] + return PackedSequence( + data_.pin_memory(), + batch_sizes_, + sorted_indices_.defined() ? sorted_indices_.pin_memory() : Tensor(), + unsorted_indices_.defined() ? unsorted_indices_.pin_memory() + : Tensor()); + } + + PackedSequence to(TensorOptions options) const { + // Performs dtype and/or device conversion on `data_`. + // + // If the ``data_`` Tensor already has the correct `torch::Dtype` + // and `torch::Device`, then ``self`` is returned. + // Otherwise, returns a copy with the desired configuration. + + // Why not convert `batch_sizes`? + // See NOTE [ device and dtype of a PackedSequence ] + Tensor data = data_.to(options); + if (data.is_same(data_)) { + return *this; + } else { + // Does not forward device or dtype args, device is set from data.device() + Tensor sorted_indices = sorted_indices_.defined() + ? sorted_indices_.to( + options.device(data.device()).dtype(sorted_indices_.dtype())) + : Tensor(); + Tensor unsorted_indices = unsorted_indices_.defined() + ? unsorted_indices_.to( + options.device(data.device()).dtype(unsorted_indices_.dtype())) + : Tensor(); + return PackedSequence( + std::move(data), + batch_sizes_, + std::move(sorted_indices), + std::move(unsorted_indices)); + } + } + + PackedSequence cuda() const { + return to(kCUDA); + } + + PackedSequence cpu() const { + return to(kCPU); + } + + /// Returns true if `data_` stored on a gpu + bool is_cuda() const { + return data_.is_cuda(); + } + + /// Returns true if `data_` stored on in pinned memory + bool is_pinned() const { + return data_.is_pinned(); + } + + private: + Tensor data_; + Tensor batch_sizes_; + Tensor sorted_indices_; + Tensor unsorted_indices_; +}; + +/// Packs a Tensor containing padded sequences of variable length. +/// +/// `input` can be of size ``T x B x *`` where `T` is the length of the +/// longest sequence (equal to ``lengths[0]``), ``B`` is the batch size, and +/// ``*`` is any number of dimensions (including 0). If ``batch_first`` is +/// ``true``, ``B x T x *`` `input` is expected. +/// +/// For unsorted sequences, use `enforce_sorted = false`. If `enforce_sorted` is +/// ``true``, the sequences should be sorted by length in a decreasing order, +/// i.e. +/// ``input[:,0]`` should be the longest sequence, and ``input[:,B-1]`` the +/// shortest one. +/// +/// Note: +/// This function accepts any input that has at least two dimensions. You +/// can apply it to pack the labels, and use the output of the RNN with +/// them to compute the loss directly. A Tensor can be retrieved from +/// a `PackedSequence` object by calling its ``.data()`` function. +/// +/// Arguments: +/// input (Tensor): padded batch of variable length sequences. +/// lengths (Tensor): list of sequences lengths of each batch element. +/// batch_first (bool, optional): if ``true``, the input is expected in ``B +/// x T x *`` +/// format. Default: ``false``. +/// enforce_sorted (bool, optional): if ``true``, the input is expected to +/// contain sequences sorted by length in a decreasing order. If +/// ``false``, this condition is not checked. Default: ``true``. +/// +/// Returns: +/// a `PackedSequence` object +inline PackedSequence pack_padded_sequence( + Tensor input, + Tensor lengths, + bool batch_first = false, + bool enforce_sorted = true) { + lengths = lengths.to(kInt64); + Tensor sorted_indices; + if (enforce_sorted) { + sorted_indices = Tensor(); + } else { + std::tie(lengths, sorted_indices) = + torch::sort(lengths, /*dim=*/-1, /*descending=*/true); + sorted_indices = sorted_indices.to(input.device()); + int64_t batch_dim = batch_first ? 0 : 1; + input = input.index_select(batch_dim, sorted_indices); + } + + auto [data, batch_sizes] = + torch::_pack_padded_sequence(input, lengths, batch_first); + return PackedSequence( + std::move(data), std::move(batch_sizes), std::move(sorted_indices), {}); +} + +/// Pads a packed batch of variable length sequences. +/// +/// It is an inverse operation to `pack_padded_sequence`. +/// +/// The returned Tensor's data will be of size ``T x B x *``, where `T` is the +/// length of the longest sequence and `B` is the batch size. If ``batch_first`` +/// is true, the data will be transposed into ``B x T x *`` format. +/// +/// Batch elements will be ordered decreasingly by their length. +/// +/// Arguments: +/// sequence (PackedSequence): batch to pad +/// batch_first (bool, optional): if ``true``, the output will be in ``B x T +/// x *`` +/// format. +/// padding_value (double, optional): values for padded elements. +/// total_length (int64_t, optional): if specified, the output will be +/// padded to +/// have length `total_length`. This method will throw error +/// if `total_length` is less than the max sequence length in +/// `sequence`. +/// +/// Returns: +/// Tuple of Tensor containing the padded sequence, and a Tensor +/// containing the list of lengths of each sequence in the batch. +inline std::tuple pad_packed_sequence( + PackedSequence sequence, + bool batch_first = false, + double padding_value = 0.0, + c10::optional total_length = torch::nullopt) { + int64_t max_seq_length = sequence.batch_sizes().size(0); + if (total_length.has_value()) { + int64_t total_length_val = total_length.value(); + TORCH_CHECK( + total_length_val >= max_seq_length, + "Expected total_length to be at least the length " + "of the longest sequence in input, but got " + "total_length=", + total_length_val, + " and max sequence length being ", + max_seq_length); + max_seq_length = total_length_val; + } + auto [padded_output, lengths] = torch::_pad_packed_sequence( + sequence.data(), + sequence.batch_sizes(), + batch_first, + padding_value, + max_seq_length); + const Tensor& unsorted_indices = sequence.unsorted_indices(); + if (unsorted_indices.defined()) { + int64_t batch_dim = batch_first ? 0 : 1; + return std::make_tuple( + padded_output.index_select(batch_dim, unsorted_indices), + lengths.index({unsorted_indices.cpu()})); + } + return std::make_tuple(padded_output, lengths); +} + +/// Pad a list of variable length Tensors with ``padding_value`` +/// +/// ``pad_sequence`` stacks a list of Tensors along a new dimension, +/// and pads them to equal length. For example, if the input is list of +/// sequences with size ``L x *`` and if batch_first is false, and ``T x B x *`` +/// otherwise. +/// +/// `B` is batch size. It is equal to the number of elements in ``sequences``. +/// `T` is length of the longest sequence. +/// `L` is length of the sequence. +/// `*` is any number of trailing dimensions, including none. +/// +/// Note: +/// This function returns a Tensor of size ``T x B x *`` or ``B x T x *`` +/// where `T` is the length of the longest sequence. This function assumes +/// trailing dimensions and type of all the Tensors in sequences are same. +/// +/// Arguments: +/// sequences (torch::ArrayRef): list of variable length sequences. +/// batch_first (bool, optional): output will be in ``B x T x *`` if true, +/// or in +/// ``T x B x *`` otherwise +/// padding_value (double, optional): value for padded elements. Default: 0. +/// +/// Returns: +/// Tensor of size ``T x B x *`` if `batch_first` is ``false``. +/// Tensor of size ``B x T x *`` otherwise +inline Tensor pad_sequence( + ArrayRef sequences, + bool batch_first = false, + double padding_value = 0) { + return at::pad_sequence(sequences, batch_first, padding_value); +} + +/// Packs a list of variable length Tensors +/// +/// ``sequences`` should be a list of Tensors of size ``L x *``, where `L` is +/// the length of a sequence and `*` is any number of trailing dimensions, +/// including zero. +/// +/// For unsorted sequences, use `enforce_sorted = false`. If ``enforce_sorted`` +/// is ``true``, the sequences should be sorted in the order of decreasing +/// length. +/// +/// +/// Arguments: +/// sequences (torch::ArrayRef): A list of sequences of decreasing +/// length. enforce_sorted (bool, optional): if ``true``, checks that the +/// input +/// contains sequences sorted by length in a decreasing order. If +/// ``false``, this condition is not checked. Default: ``true``. +/// +/// Returns: +/// a `PackedSequence` object +inline PackedSequence pack_sequence( + ArrayRef sequences, + bool enforce_sorted = true) { + Tensor lengths = torch::empty({(int64_t)sequences.size()}, kInt64); + for (const auto i : c10::irange(sequences.size())) { + lengths[i] = sequences[i].size(0); + } + return pack_padded_sequence( + at::pad_sequence(sequences), + std::move(lengths), + /*batch_first=*/false, + /*enforce_sorted=*/enforce_sorted); +} + +} // namespace rnn +} // namespace utils +} // namespace nn +} // namespace torch