applied-ai-018 commited on
Commit
3202bc1
·
verified ·
1 Parent(s): 3e63766

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/10.attention.dense.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/18.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  5. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader.h +57 -0
  6. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/base.h +255 -0
  7. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateful.h +65 -0
  8. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateless.h +82 -0
  9. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets.h +9 -0
  10. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/map.h +118 -0
  11. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/mnist.h +48 -0
  12. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/shared.h +83 -0
  13. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/stateful.h +70 -0
  14. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/base.h +47 -0
  15. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/custom_batch_request.h +21 -0
  16. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/distributed.h +139 -0
  17. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/random.h +54 -0
  18. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/sequential.h +50 -0
  19. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/serialize.h +28 -0
  20. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/stream.h +63 -0
  21. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/stack.h +49 -0
  22. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h +98 -0
  23. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h +17 -0
  24. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/activation.h +966 -0
  25. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/batchnorm.h +83 -0
  26. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/conv.h +301 -0
  27. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/distance.h +88 -0
  28. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/dropout.h +234 -0
  29. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/embedding.h +211 -0
  30. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/fold.h +102 -0
  31. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/instancenorm.h +63 -0
  32. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/linear.h +37 -0
  33. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/loss.h +1044 -0
  34. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/normalization.h +211 -0
  35. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/padding.h +58 -0
  36. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pixelshuffle.h +47 -0
  37. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pooling.h +1153 -0
  38. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/upsampling.h +289 -0
  39. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/vision.h +124 -0
  40. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h +124 -0
  41. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h +702 -0
  42. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h +36 -0
  43. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/_functions.h +26 -0
  44. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/activation.h +875 -0
  45. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/adaptive.h +109 -0
  46. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/batchnorm.h +250 -0
  47. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/common.h +97 -0
  48. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any.h +372 -0
  49. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_value.h +124 -0
  50. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/functional.h +105 -0
ckpts/universal/global_step20/zero/10.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:178341ac824177eceae2cde0ed6b39d6b7ac5888b8422cb06183e4d3b875f09b
3
+ size 16778396
ckpts/universal/global_step20/zero/18.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cafda7e1f668df1c974c23ba2e76babb4c8fb21858726e99a6486cf5faed8ac
3
+ size 33555533
ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:552aa8ecaf09955656d67ebcbbf5a85b3f0ec86a736176c6ff1e072f5e84d7a8
3
+ size 33555627
ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f9b340505ee4aaf7d2fcd6a8d81f8888a21c7ba3725c432b4377c0af8b5847d
3
+ size 33555533
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/dataloader/stateful.h>
4
+ #include <torch/data/dataloader/stateless.h>
5
+
6
+ #include <torch/csrc/utils/variadic.h>
7
+
8
+ #include <c10/util/Exception.h>
9
+
10
+ #include <cstddef>
11
+ #include <memory>
12
+ #include <type_traits>
13
+ #include <utility>
14
+
15
+ namespace torch {
16
+ namespace data {
17
+
18
+ /// Creates a `DataLoader` instance for a stateless `dataset`, a `sampler` and
19
+ /// some `options`.
20
+ template <typename Dataset, typename Sampler>
21
+ torch::disable_if_t<
22
+ Dataset::is_stateful,
23
+ std::unique_ptr<StatelessDataLoader<Dataset, Sampler>>>
24
+ make_data_loader(Dataset dataset, Sampler sampler, DataLoaderOptions options) {
25
+ return std::make_unique<StatelessDataLoader<Dataset, Sampler>>(
26
+ std::move(dataset), std::move(sampler), std::move(options));
27
+ }
28
+
29
+ /// Creates a `DataLoader` instance for a stateless `dataset` and some
30
+ /// `options`. A sampler (by default a `RandomSampler`) will be constructed from
31
+ /// the size of the dataset.
32
+ template <typename Sampler = samplers::RandomSampler, typename Dataset>
33
+ torch::disable_if_t<
34
+ Dataset::is_stateful || !std::is_constructible<Sampler, size_t>::value,
35
+ std::unique_ptr<StatelessDataLoader<Dataset, Sampler>>>
36
+ make_data_loader(
37
+ Dataset dataset,
38
+ DataLoaderOptions options = DataLoaderOptions()) {
39
+ const optional<size_t> size = dataset.size();
40
+ TORCH_CHECK(
41
+ size.has_value(),
42
+ "Expected the dataset to be sized in "
43
+ "order to construct the Sampler");
44
+ return make_data_loader(
45
+ std::move(dataset), Sampler(*size), std::move(options));
46
+ }
47
+
48
+ /// Creates a `DataLoader` for a stateful `dataset` and some `options`.
49
+ template <typename Dataset, typename = torch::enable_if_t<Dataset::is_stateful>>
50
+ std::unique_ptr<StatefulDataLoader<Dataset>> make_data_loader(
51
+ Dataset dataset,
52
+ DataLoaderOptions options = DataLoaderOptions()) {
53
+ return std::make_unique<StatefulDataLoader<Dataset>>(
54
+ std::move(dataset), std::move(options));
55
+ }
56
+ } // namespace data
57
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/base.h ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/dataloader_options.h>
4
+ #include <torch/data/detail/data_shuttle.h>
5
+ #include <torch/data/detail/sequencers.h>
6
+ #include <torch/data/iterator.h>
7
+ #include <torch/data/samplers/random.h>
8
+ #include <torch/data/worker_exception.h>
9
+ #include <torch/types.h>
10
+
11
+ #include <torch/csrc/utils/variadic.h>
12
+
13
+ #include <c10/util/Exception.h>
14
+ #include <c10/util/irange.h>
15
+
16
+ #include <cstddef>
17
+ #include <exception>
18
+ #include <memory>
19
+ #include <thread>
20
+ #include <type_traits>
21
+ #include <utility>
22
+ #include <vector>
23
+
24
+ namespace torch {
25
+ namespace data {
26
+ template <typename Dataset, typename Batch, typename BatchRequest>
27
+ class DataLoaderBase {
28
+ public:
29
+ using BatchType = Batch;
30
+ using BatchRequestType = BatchRequest;
31
+
32
+ /// Constructs a new DataLoader from a `dataset` to sample from, `options`
33
+ /// to configure the DataLoader with, and a `sampler` that specifies the
34
+ /// sampling strategy.
35
+ DataLoaderBase(
36
+ DataLoaderOptions options,
37
+ std::unique_ptr<Dataset> main_thread_dataset = nullptr)
38
+ : options_(std::move(options)),
39
+ main_thread_dataset_(std::move(main_thread_dataset)),
40
+ sequencer_(new_sequencer()) {}
41
+
42
+ // NOLINTNEXTLINE(bugprone-exception-escape)
43
+ virtual ~DataLoaderBase() {
44
+ join();
45
+ }
46
+
47
+ /// Returns an iterator into the DataLoader. The lifetime of the iterator is
48
+ /// bound to the DataLoader. In C++ standards language, the category of the
49
+ /// iterator is `OutputIterator`. See
50
+ /// https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
51
+ /// means. In short: you may increment the iterator and dereference it, but
52
+ /// cannot go back, or step forward more than one position at a time. When the
53
+ /// DataLoader is exhausted, it will compare equal with the special
54
+ /// "sentinel" iterator returned by `DataLoader::end()`. Most of the time, you
55
+ /// should only use range-for loops to loop over the DataLoader, but
56
+ /// standard algorithms like `std::copy(dataloader.begin(), dataloader.end(),
57
+ /// output_iterator)` are supported too.
58
+ Iterator<Batch> begin() {
59
+ TORCH_CHECK(
60
+ shuttle_.in_flight_jobs() == 0,
61
+ "Attempted to get a new DataLoader iterator "
62
+ "while another iterator is not yet exhausted");
63
+ reset();
64
+ return Iterator<Batch>(std::make_unique<detail::ValidIterator<Batch>>(
65
+ [this] { return this->next(); }));
66
+ }
67
+
68
+ /// Returns a special "sentinel" iterator that compares equal with a
69
+ /// non-sentinel iterator once the DataLoader is exhausted.
70
+ Iterator<Batch> end() {
71
+ return Iterator<Batch>(std::make_unique<detail::SentinelIterator<Batch>>());
72
+ }
73
+
74
+ /// Joins the DataLoader's worker threads and drains internal queues.
75
+ /// This function may only be invoked from the main thread (in which the
76
+ /// DataLoader lives).
77
+ void join() {
78
+ if (joined_) {
79
+ return;
80
+ }
81
+ shuttle_.drain();
82
+ // Send one 'quit' message per worker. Since a worker dies (exits its
83
+ // thread) after receiving this message, each `QuitWorker()` message will be
84
+ // read by exactly one worker.
85
+ for (const auto w : c10::irange(options_.workers)) {
86
+ (void)w; // Suppress unused variable warning
87
+ push_job(QuitWorker());
88
+ }
89
+ for (auto& worker : workers_) {
90
+ worker.join();
91
+ }
92
+ joined_ = true;
93
+ }
94
+
95
+ /// Returns the options with which the DataLoader was configured.
96
+ const FullDataLoaderOptions& options() const noexcept {
97
+ return options_;
98
+ }
99
+
100
+ protected:
101
+ /// Simple mix-in to give something a sequence number.
102
+ struct Sequenced {
103
+ Sequenced() = default;
104
+ Sequenced(size_t sqn) : sequence_number(sqn) {}
105
+ size_t sequence_number;
106
+ };
107
+
108
+ struct QuitWorker {};
109
+
110
+ /// A `Job` is either a `BatchRequest` (new indices to fetch data at) or a
111
+ /// `QuitWorker` object, to indicate the worker should shut down.
112
+ struct Job : Sequenced {
113
+ Job() = default;
114
+ Job(QuitWorker q, size_t sqn) : Sequenced(sqn), quit(q) {}
115
+ Job(BatchRequest&& i, size_t sqn)
116
+ : Sequenced(sqn), batch_request(std::move(i)) {}
117
+ optional<QuitWorker> quit;
118
+ optional<BatchRequest> batch_request;
119
+ };
120
+
121
+ /// The finished result of a job.
122
+ struct Result : Sequenced {
123
+ Result() = default;
124
+ Result(optional<Batch>&& b, size_t sqn)
125
+ : Sequenced(sqn), batch(std::move(b)) {}
126
+ Result(std::exception_ptr exception, size_t sqn)
127
+ : Sequenced(sqn), exception(std::move(exception)) {}
128
+ optional<Batch> batch;
129
+ std::exception_ptr exception;
130
+ };
131
+
132
+ /// Subclass hook for getting the next batch request. The stateless case will
133
+ /// ask the sampler for a new batch request (e.g. a vector of indices), while
134
+ /// the stateful one will simply return the batch size.
135
+ virtual optional<BatchRequestType> get_batch_request() = 0;
136
+
137
+ /// Resets the internal state of the DataLoader, optionally pre-fetching
138
+ /// new jobs.
139
+ virtual void reset() {
140
+ shuttle_.drain();
141
+ sequence_number_ = 0;
142
+ sequencer_ = new_sequencer();
143
+ prefetch();
144
+ }
145
+
146
+ /// Schedules `requested_jobs` many new batches to be fetched. The actual
147
+ /// number of jobs scheduled may be less if the DataLoader exhausts.
148
+ void prefetch(size_t requested_jobs) {
149
+ for (const auto r : c10::irange(requested_jobs)) {
150
+ (void)r; // Suppress unused variable
151
+ if (auto batch_request = get_batch_request()) {
152
+ this->push_job(std::move(*batch_request));
153
+ } else {
154
+ break;
155
+ }
156
+ }
157
+ }
158
+
159
+ /// Schedules the maximum number of jobs (based on the `max_jobs` option).
160
+ void prefetch() {
161
+ prefetch(options_.max_jobs);
162
+ }
163
+
164
+ /// Returns the next batch of data, or an empty `optional` if the DataLoader
165
+ /// is exhausted. This operation will block until a batch is available if one
166
+ /// is still expected.
167
+ optional<BatchType> next() {
168
+ if (options_.workers > 0) {
169
+ while (optional<Result> result = this->pop_result()) {
170
+ if (result->exception) {
171
+ throw WorkerException(result->exception);
172
+ } else if (result->batch) {
173
+ prefetch(1);
174
+ return std::move(result->batch);
175
+ }
176
+ }
177
+ } else if (auto batch_request = get_batch_request()) {
178
+ return this->main_thread_dataset_->get_batch(std::move(*batch_request));
179
+ }
180
+ return nullopt;
181
+ }
182
+
183
+ /// The function that worker threads run.
184
+ void worker_thread(Dataset& dataset) {
185
+ while (true) {
186
+ auto job = shuttle_.pop_job();
187
+ if (job.quit) {
188
+ break;
189
+ }
190
+ try {
191
+ auto batch = dataset.get_batch(std::move(*job.batch_request));
192
+ shuttle_.push_result({std::move(batch), job.sequence_number});
193
+ } catch (...) {
194
+ shuttle_.push_result({std::current_exception(), job.sequence_number});
195
+ }
196
+ }
197
+ }
198
+
199
+ /// Convenience method that calls `shuttle_.push_job()` with the next sequence
200
+ /// number.
201
+ template <typename T>
202
+ void push_job(T value) {
203
+ shuttle_.push_job({std::move(value), sequence_number_++});
204
+ }
205
+
206
+ /// Convenience method that gets the next result from the sequencer.
207
+ optional<Result> pop_result() {
208
+ return sequencer_->next(
209
+ [this] { return this->shuttle_.pop_result(this->options_.timeout); });
210
+ }
211
+
212
+ /// Convenience method that creates a new sequencer based on the
213
+ /// `enforce_ordering` option.
214
+ std::unique_ptr<detail::sequencers::Sequencer<Result>> new_sequencer() {
215
+ if (options_.enforce_ordering) {
216
+ return std::make_unique<detail::sequencers::OrderedSequencer<Result>>(
217
+ options_.max_jobs);
218
+ }
219
+ return std::make_unique<detail::sequencers::NoSequencer<Result>>();
220
+ }
221
+
222
+ /// The options the DataLoader was configured with.
223
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
224
+ const FullDataLoaderOptions options_;
225
+
226
+ /// The dataset for the main thread, only has a value if the number of
227
+ /// worker threads was configured as zero, meaning the main thread has to do
228
+ /// all the work (synchronously). NOTE: Really want this to be on the heap
229
+ /// when empty, therefore `unique_ptr` and not `optional`.
230
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
231
+ std::unique_ptr<Dataset> main_thread_dataset_;
232
+
233
+ /// The sequence number for the *next* batch to be retrieved from the
234
+ /// dataset.
235
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
236
+ size_t sequence_number_ = 0;
237
+
238
+ /// The worker threads, running the `worker_thread()` method.
239
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
240
+ std::vector<std::thread> workers_;
241
+
242
+ /// The `DataShuttle` which takes care of the life cycle of a job.
243
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
244
+ detail::DataShuttle<Job, Result> shuttle_;
245
+
246
+ /// The `Sequencer`, which handles optional ordering of batches.
247
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
248
+ std::unique_ptr<detail::sequencers::Sequencer<Result>> sequencer_;
249
+
250
+ /// True if the DataLoader has joined its worker threads.
251
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
252
+ bool joined_ = false;
253
+ };
254
+ } // namespace data
255
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateful.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/data/dataloader/base.h>
5
+
6
+ #include <cstddef>
7
+ #include <thread>
8
+ #include <utility>
9
+
10
+ namespace torch {
11
+ namespace data {
12
+
13
+ /// A dataloader for stateful datasets.
14
+ ///
15
+ /// A dataloader for stateful datatasets differs from one for stateless
16
+ /// datasets one in that the dataset is shared among worker threads, and that
17
+ /// this dataset is itself responsible for producing batches rather than
18
+ /// depending on a sampler. The statefulness here actually refers to the
19
+ /// dataset. The StatefulDataLoader simply alters the data loading algorithm to
20
+ /// accommodate the stateful, shared nature of the dataset. Note that the
21
+ /// dataset must be thread safe if more than one worker thread is used.
22
+ ///
23
+ /// A stateful dataloader is created by calling `make_data_loader` with a
24
+ /// stateful dataset.
25
+ template <typename Dataset>
26
+ class StatefulDataLoader : public DataLoaderBase<
27
+ Dataset,
28
+ typename Dataset::BatchType::value_type,
29
+ typename Dataset::BatchRequestType> {
30
+ public:
31
+ using super = DataLoaderBase<
32
+ Dataset,
33
+ typename Dataset::BatchType::value_type,
34
+ typename Dataset::BatchRequestType>;
35
+ using typename super::BatchRequestType;
36
+
37
+ /// Constructs the `StatefulDataLoader` from a `dataset` and some `options`.
38
+ StatefulDataLoader(Dataset dataset, DataLoaderOptions options)
39
+ : super(
40
+ std::move(options),
41
+ std::make_unique<Dataset>(std::move(dataset))) {
42
+ for (const auto w : c10::irange(this->options_.workers)) {
43
+ // As opposed to the stateless case, here all worker threads access the
44
+ // same underlying dataset.
45
+ this->workers_.emplace_back(
46
+ [this] { this->worker_thread(*this->main_thread_dataset_); });
47
+ }
48
+ }
49
+
50
+ private:
51
+ /// Resets the internal state of the dataloader and the dataset.
52
+ void reset() override {
53
+ this->main_thread_dataset_->reset();
54
+ // Call the base class method last because it calls `prefetch()`
55
+ super::reset();
56
+ }
57
+
58
+ /// For stateful datasets, the batch request is always the batch size. The
59
+ /// dataset is responsible for determining what goes into the batch next.
60
+ optional<BatchRequestType> get_batch_request() override {
61
+ return this->options_.batch_size;
62
+ }
63
+ };
64
+ } // namespace data
65
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateless.h ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/dataloader/base.h>
4
+ #include <torch/data/worker_exception.h>
5
+
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ #include <cstddef>
10
+ #include <thread>
11
+ #include <utility>
12
+
13
+ namespace torch {
14
+ namespace data {
15
+
16
+ /// A dataloader for stateless datasets.
17
+ ///
18
+ /// This dataloader follows the traditional PyTorch dataloader design, whereby a
19
+ /// (posssibly) stateful sampler produces *batch requests* for a stateless
20
+ /// dataset, which acts as a simple batch request to batch mapping. The batch
21
+ /// request will often be an array of indices, and if the dataset is a simple
22
+ /// image dataset, the dataset would produce the images at those indices.
23
+ template <typename Dataset, typename Sampler>
24
+ class StatelessDataLoader : public DataLoaderBase<
25
+ Dataset,
26
+ typename Dataset::BatchType,
27
+ typename Sampler::BatchRequestType> {
28
+ public:
29
+ using super = DataLoaderBase<
30
+ Dataset,
31
+ typename Dataset::BatchType,
32
+ typename Sampler::BatchRequestType>;
33
+ using typename super::BatchRequestType;
34
+
35
+ /// Constructs the `StatelessDataLoader` from a `dataset`, a `sampler` and
36
+ /// some `options`.
37
+ StatelessDataLoader(
38
+ Dataset dataset,
39
+ Sampler sampler,
40
+ DataLoaderOptions options)
41
+ : super(std::move(options)), sampler_(std::move(sampler)) {
42
+ for (const auto w : c10::irange(this->options_.workers)) {
43
+ // Here we copy the dataset into the worker thread closure. Each worker
44
+ // has its own copy of the dataset. This means the dataset must be
45
+ // trivially copiable, or else we don't expect more than one worker to
46
+ // be in use.
47
+ (void)w; // Suppress unused variable warning
48
+ this->workers_.emplace_back(
49
+ [this, dataset]() mutable { this->worker_thread(dataset); });
50
+ }
51
+ if (this->options_.workers == 0) {
52
+ this->main_thread_dataset_ =
53
+ std::make_unique<Dataset>(std::move(dataset));
54
+ }
55
+ }
56
+
57
+ private:
58
+ /// Resets the internal state of the dataloader and the sampler.
59
+ void reset() override {
60
+ sampler_.reset();
61
+ // Call the base class method last because it calls `prefetch()`
62
+ super::reset();
63
+ }
64
+
65
+ /// Queries the sampler for the next batch request (possibly progressing its
66
+ /// internal state).
67
+ optional<BatchRequestType> get_batch_request() override {
68
+ auto indices = sampler_.next(this->options_.batch_size);
69
+ if (!indices ||
70
+ (indices->size() < this->options_.batch_size &&
71
+ this->options_.drop_last)) {
72
+ return nullopt;
73
+ }
74
+ AT_ASSERT(indices->size() > 0);
75
+ return indices;
76
+ }
77
+
78
+ /// The `Sampler` used to produce batch requests.
79
+ Sampler sampler_;
80
+ };
81
+ } // namespace data
82
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/datasets/base.h>
4
+ #include <torch/data/datasets/chunk.h>
5
+ #include <torch/data/datasets/map.h>
6
+ #include <torch/data/datasets/mnist.h>
7
+ #include <torch/data/datasets/shared.h>
8
+ #include <torch/data/datasets/stateful.h>
9
+ #include <torch/data/datasets/tensor.h>
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/map.h ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/datasets/base.h>
4
+ #include <torch/types.h>
5
+
6
+ #include <c10/util/ArrayRef.h>
7
+
8
+ #include <cstddef>
9
+ #include <type_traits>
10
+ #include <utility>
11
+
12
+ namespace torch {
13
+ namespace data {
14
+ namespace datasets {
15
+ namespace detail {
16
+ template <bool C, typename T>
17
+ using optional_if_t = typename std::conditional<C, torch::optional<T>, T>::type;
18
+ } // namespace detail
19
+
20
+ /// A `MapDataset` is a dataset that applies a transform to a source dataset.
21
+ template <typename SourceDataset, typename AppliedTransform>
22
+ class MapDataset : public BatchDataset<
23
+ MapDataset<SourceDataset, AppliedTransform>,
24
+ detail::optional_if_t<
25
+ SourceDataset::is_stateful,
26
+ typename AppliedTransform::OutputBatchType>,
27
+ typename SourceDataset::BatchRequestType> {
28
+ public:
29
+ using DatasetType = SourceDataset;
30
+ using TransformType = AppliedTransform;
31
+ using BatchRequestType = typename SourceDataset::BatchRequestType;
32
+ using OutputBatchType = detail::optional_if_t<
33
+ SourceDataset::is_stateful,
34
+ typename AppliedTransform::OutputBatchType>;
35
+
36
+ MapDataset(DatasetType dataset, TransformType transform)
37
+ : dataset_(std::move(dataset)), transform_(std::move(transform)) {}
38
+
39
+ /// Gets a batch from the source dataset and applies the transform to it,
40
+ /// returning the result.
41
+ OutputBatchType get_batch(BatchRequestType indices) override {
42
+ return get_batch_impl(std::move(indices));
43
+ }
44
+
45
+ /// Returns the size of the source dataset.
46
+ // NOLINTNEXTLINE(bugprone-exception-escape)
47
+ optional<size_t> size() const noexcept override {
48
+ return dataset_.size();
49
+ }
50
+
51
+ /// Calls `reset()` on the underlying dataset.
52
+ /// NOTE: Stateless datasets do not have a reset() method, so a call to this
53
+ /// method will only compile for stateful datasets (which have a reset()
54
+ /// method).
55
+ void reset() {
56
+ dataset_.reset();
57
+ }
58
+
59
+ /// Returns the underlying dataset.
60
+ const SourceDataset& dataset() noexcept {
61
+ return dataset_;
62
+ }
63
+
64
+ /// Returns the transform being applied.
65
+ const AppliedTransform& transform() noexcept {
66
+ return transform_;
67
+ }
68
+
69
+ private:
70
+ /// The implementation of `get_batch()` for the stateless case, which simply
71
+ /// applies the transform to the output of `get_batch()` from the dataset.
72
+ template <
73
+ typename D = SourceDataset,
74
+ typename = torch::disable_if_t<D::is_stateful>>
75
+ OutputBatchType get_batch_impl(BatchRequestType indices) {
76
+ return transform_.apply_batch(dataset_.get_batch(std::move(indices)));
77
+ }
78
+
79
+ /// The implementation of `get_batch()` for the stateful case. Here, we follow
80
+ /// the semantics of `Optional.map()` in many functional languages, which
81
+ /// applies a transformation to the optional's content when the optional
82
+ /// contains a value, and returns a new optional (of a different type) if the
83
+ /// original optional returned by `get_batch()` was empty.
84
+ template <typename D = SourceDataset>
85
+ torch::enable_if_t<D::is_stateful, OutputBatchType> get_batch_impl(
86
+ BatchRequestType indices) {
87
+ if (auto batch = dataset_.get_batch(std::move(indices))) {
88
+ return transform_.apply_batch(std::move(*batch));
89
+ }
90
+ return nullopt;
91
+ }
92
+
93
+ /// The underlying dataset being transformed.
94
+ SourceDataset dataset_;
95
+
96
+ // The transformation that is applied to batches received from the dataset.
97
+ AppliedTransform transform_;
98
+ };
99
+
100
+ /// Creates a `MapDataset` with the given dataset and transform.
101
+ template <typename DatasetType, typename TransformType>
102
+ MapDataset<DatasetType, TransformType> map(
103
+ DatasetType dataset,
104
+ TransformType transform) {
105
+ static_assert(
106
+ std::is_same<
107
+ typename std::conditional<
108
+ DatasetType::is_stateful,
109
+ typename DatasetType::BatchType::value_type,
110
+ typename DatasetType::BatchType>::type,
111
+ typename TransformType::InputBatchType>::value,
112
+ "BatchType type of dataset does not match input type of transform");
113
+ return {std::move(dataset), std::move(transform)};
114
+ }
115
+
116
+ } // namespace datasets
117
+ } // namespace data
118
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/mnist.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/datasets/base.h>
4
+ #include <torch/data/example.h>
5
+ #include <torch/types.h>
6
+
7
+ #include <torch/csrc/Export.h>
8
+
9
+ #include <cstddef>
10
+ #include <string>
11
+
12
+ namespace torch {
13
+ namespace data {
14
+ namespace datasets {
15
+ /// The MNIST dataset.
16
+ class TORCH_API MNIST : public Dataset<MNIST> {
17
+ public:
18
+ /// The mode in which the dataset is loaded.
19
+ enum class Mode { kTrain, kTest };
20
+
21
+ /// Loads the MNIST dataset from the `root` path.
22
+ ///
23
+ /// The supplied `root` path should contain the *content* of the unzipped
24
+ /// MNIST dataset, available from http://yann.lecun.com/exdb/mnist.
25
+ explicit MNIST(const std::string& root, Mode mode = Mode::kTrain);
26
+
27
+ /// Returns the `Example` at the given `index`.
28
+ Example<> get(size_t index) override;
29
+
30
+ /// Returns the size of the dataset.
31
+ optional<size_t> size() const override;
32
+
33
+ /// Returns true if this is the training subset of MNIST.
34
+ // NOLINTNEXTLINE(bugprone-exception-escape)
35
+ bool is_train() const noexcept;
36
+
37
+ /// Returns all images stacked into a single tensor.
38
+ const Tensor& images() const;
39
+
40
+ /// Returns all targets stacked into a single tensor.
41
+ const Tensor& targets() const;
42
+
43
+ private:
44
+ Tensor images_, targets_;
45
+ };
46
+ } // namespace datasets
47
+ } // namespace data
48
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/shared.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/datasets/base.h>
4
+
5
+ #include <memory>
6
+ #include <utility>
7
+
8
+ namespace torch {
9
+ namespace data {
10
+ namespace datasets {
11
+
12
+ /// A dataset that wraps another dataset in a shared pointer and implements the
13
+ /// `BatchDataset` API, delegating all calls to the shared instance. This is
14
+ /// useful when you want all worker threads in the dataloader to access the same
15
+ /// dataset instance. The dataset must take care of synchronization and
16
+ /// thread-safe access itself.
17
+ ///
18
+ /// Use `torch::data::datasets::make_shared_dataset()` to create a new
19
+ /// `SharedBatchDataset` like you would a `std::shared_ptr`.
20
+ template <typename UnderlyingDataset>
21
+ class SharedBatchDataset : public BatchDataset<
22
+ SharedBatchDataset<UnderlyingDataset>,
23
+ typename UnderlyingDataset::BatchType,
24
+ typename UnderlyingDataset::BatchRequestType> {
25
+ public:
26
+ using BatchType = typename UnderlyingDataset::BatchType;
27
+ using BatchRequestType = typename UnderlyingDataset::BatchRequestType;
28
+
29
+ /// Constructs a new `SharedBatchDataset` from a `shared_ptr` to the
30
+ /// `UnderlyingDataset`.
31
+ /* implicit */ SharedBatchDataset(
32
+ std::shared_ptr<UnderlyingDataset> shared_dataset)
33
+ : dataset_(std::move(shared_dataset)) {}
34
+
35
+ /// Calls `get_batch` on the underlying dataset.
36
+ BatchType get_batch(BatchRequestType request) override {
37
+ return dataset_->get_batch(std::move(request));
38
+ }
39
+
40
+ /// Returns the `size` from the underlying dataset.
41
+ optional<size_t> size() const override {
42
+ return dataset_->size();
43
+ }
44
+
45
+ /// Accesses the underlying dataset.
46
+ UnderlyingDataset& operator*() {
47
+ return *dataset_;
48
+ }
49
+
50
+ /// Accesses the underlying dataset.
51
+ const UnderlyingDataset& operator*() const {
52
+ return *dataset_;
53
+ }
54
+
55
+ /// Accesses the underlying dataset.
56
+ UnderlyingDataset* operator->() {
57
+ return dataset_.get();
58
+ }
59
+
60
+ /// Accesses the underlying dataset.
61
+ const UnderlyingDataset* operator->() const {
62
+ return dataset_.get();
63
+ }
64
+
65
+ /// Calls `reset()` on the underlying dataset.
66
+ void reset() {
67
+ dataset_->reset();
68
+ }
69
+
70
+ private:
71
+ std::shared_ptr<UnderlyingDataset> dataset_;
72
+ };
73
+
74
+ /// Constructs a new `SharedBatchDataset` by creating a
75
+ /// `shared_ptr<UnderlyingDatase>`. All arguments are forwarded to
76
+ /// `make_shared<UnderlyingDataset>`.
77
+ template <typename UnderlyingDataset, typename... Args>
78
+ SharedBatchDataset<UnderlyingDataset> make_shared_dataset(Args&&... args) {
79
+ return std::make_shared<UnderlyingDataset>(std::forward<Args>(args)...);
80
+ }
81
+ } // namespace datasets
82
+ } // namespace data
83
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/stateful.h ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/datasets/base.h>
4
+ #include <torch/data/example.h>
5
+
6
+ #include <cstddef>
7
+ #include <vector>
8
+
9
+ namespace torch {
10
+ namespace serialize {
11
+ class OutputArchive;
12
+ class InputArchive;
13
+ } // namespace serialize
14
+ } // namespace torch
15
+
16
+ namespace torch {
17
+ namespace data {
18
+ namespace datasets {
19
+
20
+ /// A stateful dataset is a dataset that maintains some internal state, which
21
+ /// will be `reset()` at the beginning of each epoch. Subclasses can override
22
+ /// the `reset()` method to configure this behavior. Further, the return type of
23
+ /// a stateful dataset's `get_batch()` method is always an `optional`. When the
24
+ /// stateful dataset wants to indicate to the dataloader that its epoch has
25
+ /// ended, it should return an empty optional. The dataloader knows to modify
26
+ /// its implementation based on whether the dataset is stateless or stateful.
27
+ ///
28
+ /// Note that when subclassing a from `StatefulDataset<Self, T>`, the return
29
+ /// type of `get_batch()`, which the subclass must override, will be
30
+ /// `optional<T>` (i.e. the type specified in the `StatefulDataset`
31
+ /// specialization is automatically boxed into an `optional` for the dataset's
32
+ /// `BatchType`).
33
+ template <
34
+ typename Self,
35
+ typename Batch = std::vector<Example<>>,
36
+ typename BatchRequest = size_t>
37
+ class StatefulDataset
38
+ : public BatchDataset<Self, optional<Batch>, BatchRequest> {
39
+ public:
40
+ /// Resets internal state of the dataset.
41
+ virtual void reset() = 0;
42
+
43
+ /// Saves the statefulDataset's state to OutputArchive.
44
+ virtual void save(serialize::OutputArchive& archive) const = 0;
45
+
46
+ /// Deserializes the statefulDataset's state from the `archive`.
47
+ virtual void load(serialize::InputArchive& archive) = 0;
48
+ };
49
+
50
+ /// Serializes a statefulDataset to `OutputArchive`.
51
+ template <typename... Args>
52
+ serialize::OutputArchive& operator<<(
53
+ serialize::OutputArchive& archive,
54
+ const StatefulDataset<Args...>& statefulDataset) {
55
+ statefulDataset.save(archive);
56
+ return archive;
57
+ }
58
+
59
+ /// Deserializes a statefulDataset from an `InputArchive`.
60
+ template <typename... Args>
61
+ serialize::InputArchive& operator>>(
62
+ serialize::InputArchive& archive,
63
+ StatefulDataset<Args...>& statefulDataset) {
64
+ statefulDataset.load(archive);
65
+ return archive;
66
+ }
67
+
68
+ } // namespace datasets
69
+ } // namespace data
70
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/base.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/types.h>
5
+
6
+ #include <cstddef>
7
+ #include <mutex>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace serialize {
12
+ class OutputArchive;
13
+ class InputArchive;
14
+ } // namespace serialize
15
+ } // namespace torch
16
+
17
+ namespace torch {
18
+ namespace data {
19
+ namespace samplers {
20
+ /// A `Sampler` is an object that yields an index with which to access a
21
+ /// dataset.
22
+ template <typename BatchRequest = std::vector<size_t>>
23
+ class Sampler {
24
+ public:
25
+ using BatchRequestType = BatchRequest;
26
+
27
+ virtual ~Sampler() = default;
28
+
29
+ /// Resets the `Sampler`'s internal state.
30
+ /// Typically called before a new epoch.
31
+ /// Optionally, accepts a new size when reseting the sampler.
32
+ virtual void reset(optional<size_t> new_size) = 0;
33
+
34
+ /// Returns the next index if possible, or an empty optional if the
35
+ /// sampler is exhausted for this epoch.
36
+ virtual optional<BatchRequest> next(size_t batch_size) = 0;
37
+
38
+ /// Serializes the `Sampler` to the `archive`.
39
+ virtual void save(serialize::OutputArchive& archive) const = 0;
40
+
41
+ /// Deserializes the `Sampler` from the `archive`.
42
+ virtual void load(serialize::InputArchive& archive) = 0;
43
+ };
44
+
45
+ } // namespace samplers
46
+ } // namespace data
47
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/custom_batch_request.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <cstddef>
5
+
6
+ namespace torch {
7
+ namespace data {
8
+ namespace samplers {
9
+ /// A base class for custom index types.
10
+ struct TORCH_API CustomBatchRequest {
11
+ CustomBatchRequest() = default;
12
+ CustomBatchRequest(const CustomBatchRequest&) = default;
13
+ CustomBatchRequest(CustomBatchRequest&&) noexcept = default;
14
+ virtual ~CustomBatchRequest() = default;
15
+
16
+ /// The number of elements accessed by this index.
17
+ virtual size_t size() const = 0;
18
+ };
19
+ } // namespace samplers
20
+ } // namespace data
21
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/distributed.h ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/data/samplers/base.h>
5
+
6
+ #include <cstddef>
7
+ #include <vector>
8
+
9
+ namespace torch {
10
+ namespace serialize {
11
+ class OutputArchive;
12
+ class InputArchive;
13
+ } // namespace serialize
14
+ } // namespace torch
15
+
16
+ namespace torch {
17
+ namespace data {
18
+ namespace samplers {
19
+
20
+ /// A `Sampler` that selects a subset of indices to sample from and defines a
21
+ /// sampling behavior. In a distributed setting, this selects a subset of the
22
+ /// indices depending on the provided num_replicas and rank parameters. The
23
+ /// `Sampler` performs a rounding operation based on the `allow_duplicates`
24
+ /// parameter to decide the local sample count.
25
+ template <typename BatchRequest = std::vector<size_t>>
26
+ class DistributedSampler : public Sampler<BatchRequest> {
27
+ public:
28
+ DistributedSampler(
29
+ size_t size,
30
+ size_t num_replicas = 1,
31
+ size_t rank = 0,
32
+ bool allow_duplicates = true)
33
+ : size_(size),
34
+ num_replicas_(num_replicas),
35
+ rank_(rank),
36
+ epoch_(0),
37
+ allow_duplicates_(allow_duplicates) {}
38
+
39
+ /// Set the epoch for the current enumeration. This can be used to alter the
40
+ /// sample selection and shuffling behavior.
41
+ void set_epoch(size_t epoch) {
42
+ epoch_ = epoch;
43
+ }
44
+
45
+ size_t epoch() const {
46
+ return epoch_;
47
+ }
48
+
49
+ protected:
50
+ size_t local_sample_count() {
51
+ if (allow_duplicates_) {
52
+ return (size_ + num_replicas_ - 1) / num_replicas_;
53
+ } else {
54
+ return size_ / num_replicas_;
55
+ }
56
+ }
57
+
58
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
59
+ size_t size_;
60
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
61
+ size_t num_replicas_;
62
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
63
+ size_t rank_;
64
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
65
+ size_t epoch_;
66
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
67
+ bool allow_duplicates_;
68
+ };
69
+
70
+ /// Select samples randomly. The sampling order is shuffled at each `reset()`
71
+ /// call.
72
+ class TORCH_API DistributedRandomSampler : public DistributedSampler<> {
73
+ public:
74
+ DistributedRandomSampler(
75
+ size_t size,
76
+ size_t num_replicas = 1,
77
+ size_t rank = 0,
78
+ bool allow_duplicates = true);
79
+
80
+ /// Resets the `DistributedRandomSampler` to a new set of indices.
81
+ void reset(optional<size_t> new_size = nullopt) override;
82
+
83
+ /// Returns the next batch of indices.
84
+ optional<std::vector<size_t>> next(size_t batch_size) override;
85
+
86
+ /// Serializes the `DistributedRandomSampler` to the `archive`.
87
+ void save(serialize::OutputArchive& archive) const override;
88
+
89
+ /// Deserializes the `DistributedRandomSampler` from the `archive`.
90
+ void load(serialize::InputArchive& archive) override;
91
+
92
+ /// Returns the current index of the `DistributedRandomSampler`.
93
+ size_t index() const noexcept;
94
+
95
+ private:
96
+ void populate_indices();
97
+
98
+ size_t begin_index_;
99
+ size_t end_index_;
100
+ size_t sample_index_;
101
+ std::vector<size_t> all_indices_;
102
+ };
103
+
104
+ /// Select samples sequentially.
105
+ class TORCH_API DistributedSequentialSampler : public DistributedSampler<> {
106
+ public:
107
+ DistributedSequentialSampler(
108
+ size_t size,
109
+ size_t num_replicas = 1,
110
+ size_t rank = 0,
111
+ bool allow_duplicates = true);
112
+
113
+ /// Resets the `DistributedSequentialSampler` to a new set of indices.
114
+ void reset(optional<size_t> new_size = nullopt) override;
115
+
116
+ /// Returns the next batch of indices.
117
+ optional<std::vector<size_t>> next(size_t batch_size) override;
118
+
119
+ /// Serializes the `DistributedSequentialSampler` to the `archive`.
120
+ void save(serialize::OutputArchive& archive) const override;
121
+
122
+ /// Deserializes the `DistributedSequentialSampler` from the `archive`.
123
+ void load(serialize::InputArchive& archive) override;
124
+
125
+ /// Returns the current index of the `DistributedSequentialSampler`.
126
+ size_t index() const noexcept;
127
+
128
+ private:
129
+ void populate_indices();
130
+
131
+ size_t begin_index_;
132
+ size_t end_index_;
133
+ size_t sample_index_;
134
+ std::vector<size_t> all_indices_;
135
+ };
136
+
137
+ } // namespace samplers
138
+ } // namespace data
139
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/random.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/data/samplers/base.h>
5
+ #include <torch/types.h>
6
+
7
+ #include <cstddef>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace serialize {
12
+ class OutputArchive;
13
+ class InputArchive;
14
+ } // namespace serialize
15
+ } // namespace torch
16
+
17
+ namespace torch {
18
+ namespace data {
19
+ namespace samplers {
20
+
21
+ /// A `Sampler` that returns random indices.
22
+ class TORCH_API RandomSampler : public Sampler<> {
23
+ public:
24
+ /// Constructs a `RandomSampler` with a size and dtype for the stored indices.
25
+ ///
26
+ /// The constructor will eagerly allocate all required indices, which is the
27
+ /// sequence `0 ... size - 1`. `index_dtype` is the data type of the stored
28
+ /// indices. You can change it to influence memory usage.
29
+ explicit RandomSampler(int64_t size, Dtype index_dtype = torch::kInt64);
30
+
31
+ ~RandomSampler() override;
32
+
33
+ /// Resets the `RandomSampler` to a new set of indices.
34
+ void reset(optional<size_t> new_size = nullopt) override;
35
+
36
+ /// Returns the next batch of indices.
37
+ optional<std::vector<size_t>> next(size_t batch_size) override;
38
+
39
+ /// Serializes the `RandomSampler` to the `archive`.
40
+ void save(serialize::OutputArchive& archive) const override;
41
+
42
+ /// Deserializes the `RandomSampler` from the `archive`.
43
+ void load(serialize::InputArchive& archive) override;
44
+
45
+ /// Returns the current index of the `RandomSampler`.
46
+ size_t index() const noexcept;
47
+
48
+ private:
49
+ at::Tensor indices_;
50
+ int64_t index_ = 0;
51
+ };
52
+ } // namespace samplers
53
+ } // namespace data
54
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/sequential.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/data/samplers/base.h>
5
+ #include <torch/types.h>
6
+
7
+ #include <cstddef>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace serialize {
12
+ class OutputArchive;
13
+ class InputArchive;
14
+ } // namespace serialize
15
+ } // namespace torch
16
+
17
+ namespace torch {
18
+ namespace data {
19
+ namespace samplers {
20
+
21
+ /// A `Sampler` that returns indices sequentially.
22
+ class TORCH_API SequentialSampler : public Sampler<> {
23
+ public:
24
+ /// Creates a `SequentialSampler` that will return indices in the range
25
+ /// `0...size - 1`.
26
+ explicit SequentialSampler(size_t size);
27
+
28
+ /// Resets the `SequentialSampler` to zero.
29
+ void reset(optional<size_t> new_size = nullopt) override;
30
+
31
+ /// Returns the next batch of indices.
32
+ optional<std::vector<size_t>> next(size_t batch_size) override;
33
+
34
+ /// Serializes the `SequentialSampler` to the `archive`.
35
+ void save(serialize::OutputArchive& archive) const override;
36
+
37
+ /// Deserializes the `SequentialSampler` from the `archive`.
38
+ void load(serialize::InputArchive& archive) override;
39
+
40
+ /// Returns the current index of the `SequentialSampler`.
41
+ size_t index() const noexcept;
42
+
43
+ private:
44
+ size_t size_;
45
+ size_t index_{0};
46
+ };
47
+
48
+ } // namespace samplers
49
+ } // namespace data
50
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/serialize.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/samplers/base.h>
4
+ #include <torch/serialize/archive.h>
5
+
6
+ namespace torch {
7
+ namespace data {
8
+ namespace samplers {
9
+ /// Serializes a `Sampler` into an `OutputArchive`.
10
+ template <typename BatchRequest>
11
+ serialize::OutputArchive& operator<<(
12
+ serialize::OutputArchive& archive,
13
+ const Sampler<BatchRequest>& sampler) {
14
+ sampler.save(archive);
15
+ return archive;
16
+ }
17
+
18
+ /// Deserializes a `Sampler` from an `InputArchive`.
19
+ template <typename BatchRequest>
20
+ serialize::InputArchive& operator>>(
21
+ serialize::InputArchive& archive,
22
+ Sampler<BatchRequest>& sampler) {
23
+ sampler.load(archive);
24
+ return archive;
25
+ }
26
+ } // namespace samplers
27
+ } // namespace data
28
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/stream.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/data/samplers/base.h>
5
+ #include <torch/data/samplers/custom_batch_request.h>
6
+ #include <torch/types.h>
7
+
8
+ #include <cstddef>
9
+
10
+ namespace torch {
11
+ namespace serialize {
12
+ class InputArchive;
13
+ class OutputArchive;
14
+ } // namespace serialize
15
+ } // namespace torch
16
+
17
+ namespace torch {
18
+ namespace data {
19
+ namespace samplers {
20
+
21
+ /// A wrapper around a batch size value, which implements the
22
+ /// `CustomBatchRequest` interface.
23
+ struct TORCH_API BatchSize : public CustomBatchRequest {
24
+ explicit BatchSize(size_t size);
25
+ size_t size() const noexcept override;
26
+ operator size_t() const noexcept;
27
+ size_t size_;
28
+ };
29
+
30
+ /// A sampler for (potentially infinite) streams of data.
31
+ ///
32
+ /// The major feature of the `StreamSampler` is that it does not return
33
+ /// particular indices, but instead only the number of elements to fetch from
34
+ /// the dataset. The dataset has to decide how to produce those elements.
35
+ class TORCH_API StreamSampler : public Sampler<BatchSize> {
36
+ public:
37
+ /// Constructs the `StreamSampler` with the number of individual examples that
38
+ /// should be fetched until the sampler is exhausted.
39
+ explicit StreamSampler(size_t epoch_size);
40
+
41
+ /// Resets the internal state of the sampler.
42
+ void reset(optional<size_t> new_size = nullopt) override;
43
+
44
+ /// Returns a `BatchSize` object with the number of elements to fetch in the
45
+ /// next batch. This number is the minimum of the supplied `batch_size` and
46
+ /// the difference between the `epoch_size` and the current index. If the
47
+ /// `epoch_size` has been reached, returns an empty optional.
48
+ optional<BatchSize> next(size_t batch_size) override;
49
+
50
+ /// Serializes the `StreamSampler` to the `archive`.
51
+ void save(serialize::OutputArchive& archive) const override;
52
+
53
+ /// Deserializes the `StreamSampler` from the `archive`.
54
+ void load(serialize::InputArchive& archive) override;
55
+
56
+ private:
57
+ size_t examples_retrieved_so_far_ = 0;
58
+ size_t epoch_size_;
59
+ };
60
+
61
+ } // namespace samplers
62
+ } // namespace data
63
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/stack.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/example.h>
4
+ #include <torch/data/transforms/collate.h>
5
+ #include <torch/types.h>
6
+
7
+ #include <utility>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace data {
12
+ namespace transforms {
13
+
14
+ template <typename T = Example<>>
15
+ struct Stack;
16
+
17
+ /// A `Collation` for `Example<Tensor, Tensor>` types that stacks all data
18
+ /// tensors into one tensor, and all target (label) tensors into one tensor.
19
+ template <>
20
+ struct Stack<Example<>> : public Collation<Example<>> {
21
+ Example<> apply_batch(std::vector<Example<>> examples) override {
22
+ std::vector<torch::Tensor> data, targets;
23
+ data.reserve(examples.size());
24
+ targets.reserve(examples.size());
25
+ for (auto& example : examples) {
26
+ data.push_back(std::move(example.data));
27
+ targets.push_back(std::move(example.target));
28
+ }
29
+ return {torch::stack(data), torch::stack(targets)};
30
+ }
31
+ };
32
+
33
+ /// A `Collation` for `Example<Tensor, NoTarget>` types that stacks all data
34
+ /// tensors into one tensor.
35
+ template <>
36
+ struct Stack<TensorExample>
37
+ : public Collation<Example<Tensor, example::NoTarget>> {
38
+ TensorExample apply_batch(std::vector<TensorExample> examples) override {
39
+ std::vector<torch::Tensor> data;
40
+ data.reserve(examples.size());
41
+ for (auto& example : examples) {
42
+ data.push_back(std::move(example.data));
43
+ }
44
+ return torch::stack(data);
45
+ }
46
+ };
47
+ } // namespace transforms
48
+ } // namespace data
49
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/module.h>
4
+ #include <torch/types.h>
5
+ #include <torch/utils.h>
6
+
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Exception.h>
9
+
10
+ #include <memory>
11
+ #include <utility>
12
+
13
+ namespace torch {
14
+ namespace nn {
15
+ /// The `clone()` method in the base `Module` class does not have knowledge of
16
+ /// the concrete runtime type of its subclasses. Therefore, `clone()` must
17
+ /// either be called from within the subclass, or from a base class that has
18
+ /// knowledge of the concrete type. `Cloneable` uses the CRTP to gain
19
+ /// knowledge of the subclass' static type and provide an implementation of the
20
+ /// `clone()` method. We do not want to use this pattern in the base class,
21
+ /// because then storing a module would always require templatizing it.
22
+ template <typename Derived>
23
+ // NOLINTNEXTLINE(bugprone-exception-escape)
24
+ class Cloneable : public Module {
25
+ public:
26
+ using Module::Module;
27
+
28
+ /// `reset()` must perform initialization of all members with reference
29
+ /// semantics, most importantly parameters, buffers and submodules.
30
+ virtual void reset() = 0;
31
+
32
+ /// Performs a recursive "deep copy" of the `Module`, such that all parameters
33
+ /// and submodules in the cloned module are different from those in the
34
+ /// original module.
35
+ std::shared_ptr<Module> clone(
36
+ const optional<Device>& device = nullopt) const override {
37
+ NoGradGuard no_grad;
38
+
39
+ const auto& self = static_cast<const Derived&>(*this);
40
+ auto copy = std::make_shared<Derived>(self);
41
+ copy->parameters_.clear();
42
+ copy->buffers_.clear();
43
+ copy->children_.clear();
44
+ copy->reset();
45
+ TORCH_CHECK(
46
+ copy->parameters_.size() == parameters_.size(),
47
+ "The cloned module does not have the same number of "
48
+ "parameters as the original module after calling reset(). "
49
+ "Are you sure you called register_parameter() inside reset() "
50
+ "and not the constructor?");
51
+ for (const auto& parameter : named_parameters(/*recurse=*/false)) {
52
+ auto& tensor = *parameter;
53
+ auto data = device && tensor.device() != *device
54
+ ? tensor.to(*device)
55
+ : autograd::Variable(tensor).clone();
56
+ copy->parameters_[parameter.key()].set_data(data);
57
+ }
58
+ TORCH_CHECK(
59
+ copy->buffers_.size() == buffers_.size(),
60
+ "The cloned module does not have the same number of "
61
+ "buffers as the original module after calling reset(). "
62
+ "Are you sure you called register_buffer() inside reset() "
63
+ "and not the constructor?");
64
+ for (const auto& buffer : named_buffers(/*recurse=*/false)) {
65
+ auto& tensor = *buffer;
66
+ auto data = device && tensor.device() != *device
67
+ ? tensor.to(*device)
68
+ : autograd::Variable(tensor).clone();
69
+ copy->buffers_[buffer.key()].set_data(data);
70
+ }
71
+ TORCH_CHECK(
72
+ copy->children_.size() == children_.size(),
73
+ "The cloned module does not have the same number of "
74
+ "child modules as the original module after calling reset(). "
75
+ "Are you sure you called register_module() inside reset() "
76
+ "and not the constructor?");
77
+ for (const auto& child : children_) {
78
+ copy->children_[child.key()]->clone_(*child.value(), device);
79
+ }
80
+ return copy;
81
+ }
82
+
83
+ private:
84
+ void clone_(Module& other, const optional<Device>& device) final {
85
+ // Here we are *pretty* certain that `other's` type is `Derived` (because it
86
+ // was registered under the same name as `this`), but you never know what
87
+ // crazy things `reset()` does, so `dynamic_cast` just to be safe.
88
+ auto clone = std::dynamic_pointer_cast<Derived>(other.clone(device));
89
+ TORCH_CHECK(
90
+ clone != nullptr,
91
+ "Attempted to clone submodule, but it is of a "
92
+ "different type than the submodule it was to be cloned into");
93
+ static_cast<Derived&>(*this) = *clone;
94
+ }
95
+ };
96
+
97
+ } // namespace nn
98
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/functional/batchnorm.h>
4
+ #include <torch/nn/functional/conv.h>
5
+ #include <torch/nn/functional/distance.h>
6
+ #include <torch/nn/functional/dropout.h>
7
+ #include <torch/nn/functional/embedding.h>
8
+ #include <torch/nn/functional/fold.h>
9
+ #include <torch/nn/functional/instancenorm.h>
10
+ #include <torch/nn/functional/linear.h>
11
+ #include <torch/nn/functional/loss.h>
12
+ #include <torch/nn/functional/normalization.h>
13
+ #include <torch/nn/functional/padding.h>
14
+ #include <torch/nn/functional/pixelshuffle.h>
15
+ #include <torch/nn/functional/pooling.h>
16
+ #include <torch/nn/functional/upsampling.h>
17
+ #include <torch/nn/functional/vision.h>
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/activation.h ADDED
@@ -0,0 +1,966 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Dispatch.h>
4
+ #include <torch/nn/functional/dropout.h>
5
+ #include <torch/nn/functional/linear.h>
6
+ #include <torch/nn/options/activation.h>
7
+ #include <torch/nn/options/dropout.h>
8
+ #include <torch/nn/options/linear.h>
9
+ #include <torch/types.h>
10
+ #include <limits>
11
+ #include <utility>
12
+
13
+ namespace torch {
14
+ namespace nn {
15
+ namespace functional {
16
+
17
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
18
+ namespace detail {
19
+ inline Tensor elu(Tensor input, double alpha, bool inplace) {
20
+ if (inplace) {
21
+ return torch::elu_(input, alpha);
22
+ } else {
23
+ return torch::elu(input, alpha);
24
+ }
25
+ }
26
+ } // namespace detail
27
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
28
+
29
+ /// See
30
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.elu
31
+ /// about the exact behavior of this functional.
32
+ ///
33
+ /// See the documentation for `torch::nn::functional::ELUFuncOptions` class to
34
+ /// learn what optional arguments are supported for this functional.
35
+ ///
36
+ /// Example:
37
+ /// ```
38
+ /// namespace F = torch::nn::functional;
39
+ /// F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true));
40
+ /// ```
41
+ inline Tensor elu(Tensor input, const ELUFuncOptions& options = {}) {
42
+ return detail::elu(std::move(input), options.alpha(), options.inplace());
43
+ }
44
+
45
+ // ============================================================================
46
+
47
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
48
+ namespace detail {
49
+ inline Tensor selu(Tensor input, bool inplace) {
50
+ if (inplace) {
51
+ return torch::selu_(input);
52
+ } else {
53
+ return torch::selu(input);
54
+ }
55
+ }
56
+ } // namespace detail
57
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
58
+
59
+ /// See
60
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.selu
61
+ /// about the exact behavior of this functional.
62
+ ///
63
+ /// See the documentation for `torch::nn::functional::SELUFuncOptions` class to
64
+ /// learn what optional arguments are supported for this functional.
65
+ ///
66
+ /// Example:
67
+ /// ```
68
+ /// namespace F = torch::nn::functional;
69
+ /// F::selu(input, F::SELUFuncOptions(false));
70
+ /// ```
71
+ inline Tensor selu(Tensor input, const SELUFuncOptions& options = {}) {
72
+ return detail::selu(std::move(input), options.inplace());
73
+ }
74
+
75
+ // ============================================================================
76
+
77
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
78
+ namespace detail {
79
+ inline Tensor hardshrink(const Tensor& input, double lambda) {
80
+ return torch::hardshrink(input, lambda);
81
+ }
82
+ } // namespace detail
83
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
84
+
85
+ /// See
86
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hardshrink
87
+ /// about the exact behavior of this functional.
88
+ ///
89
+ /// See the documentation for `torch::nn::functional::HardshrinkFuncOptions`
90
+ /// class to learn what optional arguments are supported for this functional.
91
+ ///
92
+ /// Example:
93
+ /// ```
94
+ /// namespace F = torch::nn::functional;
95
+ /// F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42));
96
+ /// ```
97
+ inline Tensor hardshrink(
98
+ const Tensor& input,
99
+ const HardshrinkFuncOptions& options = {}) {
100
+ return detail::hardshrink(input, options.lambda());
101
+ }
102
+
103
+ // ============================================================================
104
+
105
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
106
+ namespace detail {
107
+ inline Tensor hardtanh(
108
+ Tensor input,
109
+ double min_val,
110
+ double max_val,
111
+ bool inplace) {
112
+ if (inplace) {
113
+ return torch::hardtanh_(input, min_val, max_val);
114
+ } else {
115
+ return torch::hardtanh(input, min_val, max_val);
116
+ }
117
+ }
118
+ } // namespace detail
119
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
120
+
121
+ /// See
122
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hardtanh
123
+ /// about the exact behavior of this functional.
124
+ ///
125
+ /// See the documentation for `torch::nn::functional::HardtanhFuncOptions` class
126
+ /// to learn what optional arguments are supported for this functional.
127
+ ///
128
+ /// Example:
129
+ /// ```
130
+ /// namespace F = torch::nn::functional;
131
+ /// F::hardtanh(x,
132
+ /// F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true));
133
+ /// ```
134
+ inline Tensor hardtanh(Tensor input, const HardtanhFuncOptions& options = {}) {
135
+ return detail::hardtanh(
136
+ std::move(input),
137
+ options.min_val(),
138
+ options.max_val(),
139
+ options.inplace());
140
+ }
141
+
142
+ // ============================================================================
143
+
144
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
145
+ namespace detail {
146
+ inline Tensor leaky_relu(Tensor input, double negative_slope, bool inplace) {
147
+ if (inplace) {
148
+ return torch::leaky_relu_(input, negative_slope);
149
+ } else {
150
+ return torch::leaky_relu(input, negative_slope);
151
+ }
152
+ }
153
+ } // namespace detail
154
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
155
+
156
+ /// See
157
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.leaky_relu
158
+ /// about the exact behavior of this functional.
159
+ ///
160
+ /// See the documentation for `torch::nn::functional::LeakyReLUFuncOptions`
161
+ /// class to learn what optional arguments are supported for this functional.
162
+ ///
163
+ /// Example:
164
+ /// ```
165
+ /// namespace F = torch::nn::functional;
166
+ /// F::leaky_relu(x,
167
+ /// F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true));
168
+ /// ```
169
+ inline Tensor leaky_relu(
170
+ Tensor input,
171
+ const LeakyReLUFuncOptions& options = {}) {
172
+ return detail::leaky_relu(
173
+ std::move(input), options.negative_slope(), options.inplace());
174
+ }
175
+
176
+ // ============================================================================
177
+
178
+ inline Tensor logsigmoid(const Tensor& input) {
179
+ return torch::log_sigmoid(input);
180
+ }
181
+
182
+ // ============================================================================
183
+
184
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
185
+ namespace detail {
186
+ inline Tensor gumbel_softmax(
187
+ const Tensor& logits,
188
+ double tau,
189
+ bool hard,
190
+ int dim) {
191
+ auto gumbels =
192
+ -torch::empty_like(logits).exponential_().log(); // ~Gumbel(0,1)
193
+ gumbels = (logits + gumbels) / tau; // ~Gumbel(logits, tau)
194
+ auto y_soft = gumbels.softmax(dim);
195
+
196
+ torch::Tensor ret;
197
+ if (hard) {
198
+ // Straight through.
199
+ auto index = std::get<1>(y_soft.max(dim, /*keepdim=*/true));
200
+ auto y_hard = torch::zeros_like(logits).scatter_(dim, index, 1.0);
201
+ ret = y_hard - y_soft.detach() + y_soft;
202
+ } else {
203
+ ret = y_soft;
204
+ }
205
+ return ret;
206
+ }
207
+ } // namespace detail
208
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
209
+
210
+ /// See
211
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.gumbel_softmax
212
+ /// about the exact behavior of this functional.
213
+ ///
214
+ /// See the documentation for `torch::nn::functional::GumbelSoftmaxFuncOptions`
215
+ /// class to learn what optional arguments are supported for this functional.
216
+ ///
217
+ /// Example:
218
+ /// ```
219
+ /// namespace F = torch::nn::functional;
220
+ /// F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(-1));
221
+ /// ```
222
+ inline Tensor gumbel_softmax(
223
+ const Tensor& logits,
224
+ const GumbelSoftmaxFuncOptions& options = {}) {
225
+ return detail::gumbel_softmax(
226
+ logits, options.tau(), options.hard(), options.dim());
227
+ }
228
+
229
+ // ============================================================================
230
+
231
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
232
+ namespace detail {
233
+ inline Tensor softmax(
234
+ const Tensor& input,
235
+ int64_t dim,
236
+ c10::optional<torch::Dtype> dtype) {
237
+ Tensor ret;
238
+
239
+ if (dtype == c10::nullopt) {
240
+ ret = input.softmax(dim);
241
+ } else {
242
+ ret = input.softmax(dim, dtype);
243
+ }
244
+
245
+ return ret;
246
+ }
247
+ } // namespace detail
248
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
249
+
250
+ /// See
251
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softmax
252
+ /// about the exact behavior of this functional.
253
+ ///
254
+ /// See the documentation for `torch::nn::functional::SoftmaxFuncOptions` class
255
+ /// to learn what optional arguments are supported for this functional.
256
+ ///
257
+ /// Example:
258
+ /// ```
259
+ /// namespace F = torch::nn::functional;
260
+ /// F::softmax(input, F::SoftmaxFuncOptions(1));
261
+ /// ```
262
+ inline Tensor softmax(const Tensor& input, const SoftmaxFuncOptions& options) {
263
+ return detail::softmax(input, options.dim(), options.dtype());
264
+ }
265
+
266
+ // ============================================================================
267
+
268
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
269
+ namespace detail {
270
+ inline Tensor softmin(
271
+ const Tensor& input,
272
+ int64_t dim,
273
+ c10::optional<torch::Dtype> dtype) {
274
+ Tensor ret;
275
+
276
+ if (dtype == c10::nullopt) {
277
+ ret = (-input).softmax(dim);
278
+ } else {
279
+ ret = (-input).softmax(dim, dtype);
280
+ }
281
+
282
+ return ret;
283
+ }
284
+ } // namespace detail
285
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
286
+
287
+ /// See
288
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softmin
289
+ /// about the exact behavior of this functional.
290
+ ///
291
+ /// See the documentation for `torch::nn::functional::SoftminFuncOptions` class
292
+ /// to learn what optional arguments are supported for this functional.
293
+ ///
294
+ /// Example:
295
+ /// ```
296
+ /// namespace F = torch::nn::functional;
297
+ /// F::softmin(input, F::SoftminFuncOptions(1));
298
+ /// ```
299
+ inline Tensor softmin(const Tensor& input, const SoftminFuncOptions& options) {
300
+ return detail::softmin(input, options.dim(), options.dtype());
301
+ }
302
+
303
+ // ============================================================================
304
+
305
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
306
+ namespace detail {
307
+ inline Tensor log_softmax(
308
+ const Tensor& input,
309
+ int64_t dim,
310
+ c10::optional<torch::Dtype> dtype) {
311
+ Tensor ret;
312
+
313
+ if (dtype == c10::nullopt) {
314
+ ret = input.log_softmax(dim);
315
+ } else {
316
+ ret = input.log_softmax(dim, dtype);
317
+ }
318
+
319
+ return ret;
320
+ }
321
+ } // namespace detail
322
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
323
+
324
+ /// See
325
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.log_softmax
326
+ /// about the exact behavior of this functional.
327
+ ///
328
+ /// See the documentation for `torch::nn::functional::LogSoftmaxFuncOptions`
329
+ /// class to learn what optional arguments are supported for this functional.
330
+ ///
331
+ /// Example:
332
+ /// ```
333
+ /// namespace F = torch::nn::functional;
334
+ /// F::log_softmax(input, LogSoftmaxFuncOptions(1));
335
+ /// ```
336
+ inline Tensor log_softmax(
337
+ const Tensor& input,
338
+ const LogSoftmaxFuncOptions& options) {
339
+ return detail::log_softmax(input, options.dim(), options.dtype());
340
+ }
341
+
342
+ // ============================================================================
343
+
344
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
345
+ namespace detail {
346
+ inline Tensor glu(const Tensor& input, int64_t dim) {
347
+ TORCH_CHECK(
348
+ input.dim() != 0,
349
+ "glu does not suppport scalars because halving size must be even");
350
+ return torch::glu(input, dim);
351
+ }
352
+ } // namespace detail
353
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
354
+
355
+ /// See
356
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.glu
357
+ /// about the exact behavior of this functional.
358
+ ///
359
+ /// See the documentation for `torch::nn::functional::GLUFuncOptions` class to
360
+ /// learn what optional arguments are supported for this functional.
361
+ ///
362
+ /// Example:
363
+ /// ```
364
+ /// namespace F = torch::nn::functional;
365
+ /// F::glu(input, GLUFuncOptions(1));
366
+ /// ```
367
+ inline Tensor glu(const Tensor& input, const GLUFuncOptions& options = {}) {
368
+ return detail::glu(input, options.dim());
369
+ }
370
+
371
+ // ============================================================================
372
+
373
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
374
+ namespace detail {
375
+ inline Tensor gelu(const Tensor& input, string approximate) {
376
+ return torch::gelu(input, approximate);
377
+ }
378
+ } // namespace detail
379
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
380
+
381
+ inline Tensor gelu(const Tensor& input, const GELUFuncOptions& options = {}) {
382
+ return detail::gelu(input, options.approximate());
383
+ }
384
+
385
+ // ============================================================================
386
+
387
+ inline Tensor silu(const Tensor& input) {
388
+ return torch::silu(input);
389
+ }
390
+
391
+ // ============================================================================
392
+
393
+ inline Tensor mish(const Tensor& input) {
394
+ return torch::mish(input);
395
+ }
396
+
397
+ // ============================================================================
398
+
399
+ inline Tensor prelu(const Tensor& input, const Tensor& weight) {
400
+ return torch::prelu(input, weight);
401
+ }
402
+
403
+ // ============================================================================
404
+
405
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
406
+ namespace detail {
407
+ inline Tensor relu(Tensor input, bool inplace) {
408
+ if (inplace) {
409
+ return torch::relu_(input);
410
+ } else {
411
+ return torch::relu(input);
412
+ }
413
+ }
414
+ } // namespace detail
415
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
416
+
417
+ /// See
418
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.relu
419
+ /// about the exact behavior of this functional.
420
+ ///
421
+ /// See the documentation for `torch::nn::functional::ReLUFuncOptions` class to
422
+ /// learn what optional arguments are supported for this functional.
423
+ ///
424
+ /// Example:
425
+ /// ```
426
+ /// namespace F = torch::nn::functional;
427
+ /// F::relu(x, F::ReLUFuncOptions().inplace(true));
428
+ /// ```
429
+ inline Tensor relu(Tensor input, const ReLUFuncOptions& options = {}) {
430
+ return detail::relu(std::move(input), options.inplace());
431
+ }
432
+
433
+ // ============================================================================
434
+
435
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
436
+ namespace detail {
437
+ inline Tensor relu6(Tensor input, bool inplace) {
438
+ if (inplace) {
439
+ return torch::relu6_(input);
440
+ } else {
441
+ return torch::relu6(input);
442
+ }
443
+ }
444
+ } // namespace detail
445
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
446
+
447
+ /// See
448
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.relu6
449
+ /// about the exact behavior of this functional.
450
+ ///
451
+ /// See the documentation for `torch::nn::functional::ReLU6FuncOptions` class to
452
+ /// learn what optional arguments are supported for this functional.
453
+ ///
454
+ /// Example:
455
+ /// ```
456
+ /// namespace F = torch::nn::functional;
457
+ /// F::relu6(x, F::ReLU6FuncOptions().inplace(true));
458
+ /// ```
459
+ inline Tensor relu6(Tensor input, const ReLU6FuncOptions& options = {}) {
460
+ return detail::relu6(std::move(input), options.inplace());
461
+ }
462
+
463
+ // ============================================================================
464
+
465
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
466
+ namespace detail {
467
+ inline Tensor rrelu(
468
+ Tensor input,
469
+ double lower,
470
+ double upper,
471
+ bool training,
472
+ bool inplace) {
473
+ if (inplace) {
474
+ return torch::rrelu_(input, lower, upper, training);
475
+ } else {
476
+ return torch::rrelu(input, lower, upper, training);
477
+ }
478
+ }
479
+ } // namespace detail
480
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
481
+
482
+ /// See
483
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.rrelu
484
+ /// about the exact behavior of this functional.
485
+ ///
486
+ /// See the documentation for `torch::nn::functional::RReLUFuncOptions` class to
487
+ /// learn what optional arguments are supported for this functional.
488
+ ///
489
+ /// Example:
490
+ /// ```
491
+ /// namespace F = torch::nn::functional;
492
+ /// F::rrelu(x, F::RReLUFuncOptions().lower(0.1).upper(0.4).inplace(true));
493
+ /// ```
494
+ inline Tensor rrelu(Tensor input, const RReLUFuncOptions& options = {}) {
495
+ return detail::rrelu(
496
+ std::move(input),
497
+ options.lower(),
498
+ options.upper(),
499
+ options.training(),
500
+ options.inplace());
501
+ }
502
+
503
+ // ============================================================================
504
+
505
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
506
+ namespace detail {
507
+ inline Tensor celu(Tensor input, double alpha, bool inplace) {
508
+ if (inplace) {
509
+ return torch::celu_(input, alpha);
510
+ } else {
511
+ return torch::celu(input, alpha);
512
+ }
513
+ }
514
+ } // namespace detail
515
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
516
+
517
+ /// See
518
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.celu
519
+ /// about the exact behavior of this functional.
520
+ ///
521
+ /// See the documentation for `torch::nn::functional::CELUFuncOptions` class to
522
+ /// learn what optional arguments are supported for this functional.
523
+ ///
524
+ /// Example:
525
+ /// ```
526
+ /// namespace F = torch::nn::functional;
527
+ /// F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true));
528
+ /// ```
529
+ inline Tensor celu(Tensor input, const CELUFuncOptions& options = {}) {
530
+ return detail::celu(std::move(input), options.alpha(), options.inplace());
531
+ }
532
+
533
+ // ============================================================================
534
+
535
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
536
+ namespace detail {
537
+ inline Tensor softplus(const Tensor& input, double beta, double threshold) {
538
+ return torch::softplus(input, beta, threshold);
539
+ }
540
+ } // namespace detail
541
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
542
+
543
+ /// See
544
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softplus
545
+ /// about the exact behavior of this functional.
546
+ ///
547
+ /// See the documentation for `torch::nn::functional::SoftplusFuncOptions` class
548
+ /// to learn what optional arguments are supported for this functional.
549
+ ///
550
+ /// Example:
551
+ /// ```
552
+ /// namespace F = torch::nn::functional;
553
+ /// F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0));
554
+ /// ```
555
+ inline Tensor softplus(
556
+ const Tensor& input,
557
+ const SoftplusFuncOptions& options = {}) {
558
+ return detail::softplus(input, options.beta(), options.threshold());
559
+ }
560
+
561
+ // ============================================================================
562
+
563
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
564
+ namespace detail {
565
+ inline Tensor softshrink(const Tensor& input, double lambda) {
566
+ return torch::softshrink(input, lambda);
567
+ }
568
+ } // namespace detail
569
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
570
+
571
+ /// See
572
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softshrink
573
+ /// about the exact behavior of this functional.
574
+ ///
575
+ /// See the documentation for `torch::nn::functional::SoftshrinkFuncOptions`
576
+ /// class to learn what optional arguments are supported for this functional.
577
+ ///
578
+ /// Example:
579
+ /// ```
580
+ /// namespace F = torch::nn::functional;
581
+ /// F::softshrink(x, F::SoftshrinkFuncOptions(0.42));
582
+ /// ```
583
+ inline Tensor softshrink(
584
+ const Tensor& input,
585
+ const SoftshrinkFuncOptions& options = {}) {
586
+ return detail::softshrink(input, options.lambda());
587
+ }
588
+
589
+ // ============================================================================
590
+
591
+ inline Tensor softsign(const Tensor& input) {
592
+ return input / (input.abs() + 1);
593
+ }
594
+
595
+ // ============================================================================
596
+
597
+ inline Tensor tanhshrink(const Tensor& input) {
598
+ return input - input.tanh();
599
+ }
600
+
601
+ // ============================================================================
602
+
603
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
604
+ namespace detail {
605
+ inline Tensor threshold(
606
+ Tensor input,
607
+ double threshold,
608
+ double value,
609
+ bool inplace) {
610
+ if (inplace) {
611
+ return torch::threshold_(input, threshold, value);
612
+ } else {
613
+ return torch::threshold(input, threshold, value);
614
+ }
615
+ }
616
+ } // namespace detail
617
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
618
+
619
+ /// See
620
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.threshold
621
+ /// about the exact behavior of this functional.
622
+ ///
623
+ /// See the documentation for `torch::nn::functional::ThresholdFuncOptions`
624
+ /// class to learn what optional arguments are supported for this functional.
625
+ ///
626
+ /// Example:
627
+ /// ```
628
+ /// namespace F = torch::nn::functional;
629
+ /// F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true));
630
+ /// ```
631
+ inline Tensor threshold(Tensor input, const ThresholdFuncOptions& options) {
632
+ return detail::threshold(
633
+ std::move(input),
634
+ options.threshold(),
635
+ options.value(),
636
+ options.inplace());
637
+ }
638
+
639
+ // ============================================================================
640
+
641
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
642
+ namespace detail {
643
+ inline std::tuple<Tensor, Tensor> multi_head_attention_forward(
644
+ const Tensor& query,
645
+ const Tensor& key,
646
+ const Tensor& value,
647
+ int64_t embed_dim_to_check,
648
+ int64_t num_heads,
649
+ const Tensor& in_proj_weight,
650
+ const Tensor& in_proj_bias,
651
+ const Tensor& bias_k,
652
+ const Tensor& bias_v,
653
+ bool add_zero_attn,
654
+ double dropout_p,
655
+ const Tensor& out_proj_weight,
656
+ const Tensor& out_proj_bias,
657
+ bool training = true,
658
+ const Tensor& key_padding_mask = {},
659
+ bool need_weights = true,
660
+ const Tensor& attn_mask = {},
661
+ bool use_separate_proj_weight = false,
662
+ const Tensor& q_proj_weight = {},
663
+ const Tensor& k_proj_weight = {},
664
+ const Tensor& v_proj_weight = {},
665
+ const Tensor& static_k = {},
666
+ const Tensor& static_v = {},
667
+ bool average_attn_weights = true) {
668
+ namespace F = torch::nn::functional;
669
+
670
+ const auto query_sizes = query.sizes();
671
+ const auto& tgt_len = query_sizes[0];
672
+ const auto& bsz = query_sizes[1];
673
+ const auto& embed_dim = query_sizes[2];
674
+ TORCH_INTERNAL_ASSERT(embed_dim == embed_dim_to_check);
675
+ TORCH_INTERNAL_ASSERT(key.sizes() == value.sizes());
676
+
677
+ const auto head_dim = embed_dim / num_heads;
678
+ TORCH_CHECK(
679
+ head_dim * num_heads == embed_dim,
680
+ "embed_dim must be divisible by num_heads");
681
+ const auto scaling = 1 / std::sqrt(head_dim);
682
+
683
+ Tensor q, k, v;
684
+ if (!use_separate_proj_weight) {
685
+ if (torch::equal(query, key) && torch::equal(key, value)) {
686
+ // self-attention
687
+ const auto chunks =
688
+ F::linear(query, in_proj_weight, in_proj_bias).chunk(3, /*dim=*/-1);
689
+ q = chunks[0];
690
+ k = chunks[1];
691
+ v = chunks[2];
692
+ } else if (torch::equal(key, value)) {
693
+ // encoder-decoder attention
694
+ // This is inline in_proj function with in_proj_weight and in_proj_bias
695
+ auto _b = in_proj_bias;
696
+ auto _start = 0;
697
+ auto _end = embed_dim;
698
+ auto _w = in_proj_weight.slice(/*dim=*/0, _start, _end);
699
+ if (_b.defined()) {
700
+ _b = _b.slice(/*dim=*/0, _start, _end);
701
+ }
702
+ q = F::linear(query, _w, _b);
703
+
704
+ if (!key.defined()) {
705
+ TORCH_INTERNAL_ASSERT(!value.defined());
706
+ k.reset();
707
+ v.reset();
708
+ } else {
709
+ // This is inline in_proj function with in_proj_weight and in_proj_bias
710
+ _b = in_proj_bias;
711
+ _start = embed_dim;
712
+ _w = in_proj_weight.slice(/*dim=*/0, _start);
713
+ if (_b.defined()) {
714
+ _b = _b.slice(/*dim=*/0, _start);
715
+ }
716
+ const auto chunks = F::linear(key, _w, _b).chunk(2, /*dim=*/-1);
717
+ k = chunks[0];
718
+ v = chunks[1];
719
+ }
720
+ } else {
721
+ // This is inline in_proj function with in_proj_weight and in_proj_bias
722
+ auto _b = in_proj_bias;
723
+ auto _start = 0;
724
+ auto _end = embed_dim;
725
+ auto _w = in_proj_weight.slice(/*dim=*/0, _start, _end);
726
+ if (_b.defined()) {
727
+ _b = _b.slice(/*dim=*/0, _start, _end);
728
+ }
729
+ q = F::linear(query, _w, _b);
730
+
731
+ // This is inline in_proj function with in_proj_weight and in_proj_bias
732
+ _b = in_proj_bias;
733
+ _start = embed_dim;
734
+ _end = embed_dim * 2;
735
+ _w = in_proj_weight.slice(/*dim=*/0, _start, _end);
736
+ if (_b.defined()) {
737
+ _b = _b.slice(/*dim=*/0, _start, _end);
738
+ }
739
+ k = F::linear(key, _w, _b);
740
+
741
+ // This is inline in_proj function with in_proj_weight and in_proj_bias
742
+ _b = in_proj_bias;
743
+ _start = embed_dim * 2;
744
+ _w = in_proj_weight.slice(/*dim=*/0, _start);
745
+ if (_b.defined()) {
746
+ _b = _b.slice(0, _start);
747
+ }
748
+ v = F::linear(value, _w, _b);
749
+ }
750
+ } else {
751
+ const auto& q_proj_weight_non_opt = q_proj_weight;
752
+ {
753
+ const auto sizes = q_proj_weight_non_opt.sizes();
754
+ const auto len1 = sizes[0];
755
+ const auto len2 = sizes[1];
756
+ TORCH_CHECK(len1 == embed_dim && len2 == query.size(-1));
757
+ }
758
+
759
+ const auto& k_proj_weight_non_opt = k_proj_weight;
760
+ {
761
+ const auto sizes = k_proj_weight_non_opt.sizes();
762
+ const auto len1 = sizes[0];
763
+ const auto len2 = sizes[1];
764
+ TORCH_CHECK(len1 == embed_dim && len2 == key.size(-1));
765
+ }
766
+
767
+ const auto& v_proj_weight_non_opt = v_proj_weight;
768
+ {
769
+ const auto sizes = v_proj_weight_non_opt.sizes();
770
+ const auto len1 = sizes[0];
771
+ const auto len2 = sizes[1];
772
+ TORCH_CHECK(len1 == embed_dim && len2 == value.size(-1));
773
+ }
774
+
775
+ if (in_proj_bias.defined()) {
776
+ q = F::linear(
777
+ query,
778
+ q_proj_weight_non_opt,
779
+ in_proj_bias.slice(/*dim=*/0, 0, embed_dim));
780
+ k = F::linear(
781
+ key,
782
+ k_proj_weight_non_opt,
783
+ in_proj_bias.slice(/*dim=*/0, embed_dim, (embed_dim * 2)));
784
+ v = F::linear(
785
+ value,
786
+ v_proj_weight_non_opt,
787
+ in_proj_bias.slice(/*dim=*/0, (embed_dim * 2)));
788
+ } else {
789
+ q = F::linear(query, q_proj_weight_non_opt, in_proj_bias);
790
+ k = F::linear(key, k_proj_weight_non_opt, in_proj_bias);
791
+ v = F::linear(value, v_proj_weight_non_opt, in_proj_bias);
792
+ }
793
+ }
794
+ q = q * scaling;
795
+ Tensor attn_mask_ = attn_mask;
796
+ Tensor key_padding_mask_ = key_padding_mask;
797
+ if (bias_k.defined() && bias_v.defined()) {
798
+ if (!static_k.defined() && !static_v.defined()) {
799
+ k = torch::cat({k, bias_k.repeat({1, bsz, 1})});
800
+ v = torch::cat({v, bias_v.repeat({1, bsz, 1})});
801
+ if (attn_mask_.defined()) {
802
+ attn_mask_ = torch::cat(
803
+ {attn_mask_,
804
+ torch::zeros(
805
+ {attn_mask_.size(0), 1},
806
+ at::TensorOptions(attn_mask_.dtype())
807
+ .device(attn_mask_.device()))},
808
+ /*dim=*/1);
809
+ }
810
+ if (key_padding_mask_.defined()) {
811
+ key_padding_mask_ = torch::cat(
812
+ {key_padding_mask_,
813
+ torch::zeros(
814
+ {key_padding_mask_.size(0), 1},
815
+ at::TensorOptions(key_padding_mask_.dtype())
816
+ .device(key_padding_mask_.device()))},
817
+ /*dim=*/1);
818
+ }
819
+ } else {
820
+ TORCH_CHECK(!static_k.defined(), "bias cannot be added to static key.");
821
+ TORCH_CHECK(!static_v.defined(), "bias cannot be added to static value.");
822
+ }
823
+ } else {
824
+ TORCH_CHECK(!bias_k.defined());
825
+ TORCH_CHECK(!bias_v.defined());
826
+ }
827
+ q = q.contiguous().view({tgt_len, bsz * num_heads, head_dim}).transpose(0, 1);
828
+ if (k.defined()) {
829
+ k = k.contiguous().view({-1, bsz * num_heads, head_dim}).transpose(0, 1);
830
+ }
831
+ if (v.defined()) {
832
+ v = v.contiguous().view({-1, bsz * num_heads, head_dim}).transpose(0, 1);
833
+ }
834
+ if (static_k.defined()) {
835
+ TORCH_CHECK(static_k.size(0) == bsz * num_heads);
836
+ TORCH_CHECK(static_k.size(2) == head_dim);
837
+ k = static_k;
838
+ }
839
+ if (static_v.defined()) {
840
+ TORCH_CHECK(static_v.size(0) == bsz * num_heads);
841
+ TORCH_CHECK(static_v.size(2) == head_dim);
842
+ v = static_v;
843
+ }
844
+ auto src_len = k.size(1);
845
+ if (key_padding_mask_.defined()) {
846
+ TORCH_CHECK(key_padding_mask_.size(0) == bsz);
847
+ TORCH_CHECK(key_padding_mask_.size(1) == src_len);
848
+ }
849
+ if (add_zero_attn) {
850
+ src_len += 1;
851
+ auto k_sizes = k.sizes().vec();
852
+ k_sizes[1] = 1;
853
+ k = torch::cat(
854
+ {k,
855
+ torch::zeros(
856
+ k_sizes, at::TensorOptions(k.dtype()).device(k.device()))},
857
+ /*dim=*/1);
858
+ auto v_sizes = v.sizes().vec();
859
+ v_sizes[1] = 1;
860
+ v = torch::cat(
861
+ {v,
862
+ torch::zeros(
863
+ v_sizes, at::TensorOptions(v.dtype()).device(v.device()))},
864
+ /*dim=*/1);
865
+ if (attn_mask_.defined()) {
866
+ attn_mask_ = torch::cat(
867
+ {attn_mask_,
868
+ torch::zeros(
869
+ {attn_mask_.size(0), 1},
870
+ at::TensorOptions(attn_mask_.dtype())
871
+ .device(attn_mask_.device()))},
872
+ /*dim=*/1);
873
+ }
874
+ if (key_padding_mask_.defined()) {
875
+ key_padding_mask_ = torch::cat(
876
+ {key_padding_mask_,
877
+ torch::zeros(
878
+ {key_padding_mask_.size(0), 1},
879
+ at::TensorOptions(key_padding_mask_.dtype())
880
+ .device(key_padding_mask_.device()))},
881
+ /*dim=*/1);
882
+ }
883
+ }
884
+ auto attn_output_weights = torch::bmm(q, k.transpose(1, 2));
885
+ TORCH_CHECK(
886
+ attn_output_weights.sizes() ==
887
+ IntArrayRef({bsz * num_heads, tgt_len, src_len}));
888
+ if (attn_mask_.defined()) {
889
+ attn_mask_ = attn_mask_.unsqueeze(0);
890
+ attn_output_weights += attn_mask_;
891
+ }
892
+ if (key_padding_mask_.defined()) {
893
+ attn_output_weights =
894
+ attn_output_weights.view({bsz, num_heads, tgt_len, src_len});
895
+ attn_output_weights = AT_DISPATCH_FLOATING_TYPES(
896
+ attn_output_weights.scalar_type(),
897
+ "attn_output_weights.masked_fill",
898
+ [&]() {
899
+ return attn_output_weights.masked_fill(
900
+ key_padding_mask_.unsqueeze(1).unsqueeze(2),
901
+ -std::numeric_limits<scalar_t>::infinity());
902
+ });
903
+ attn_output_weights =
904
+ attn_output_weights.view({bsz * num_heads, tgt_len, src_len});
905
+ }
906
+ // NOLINTNEXTLINE(bugprone-argument-comment)
907
+ attn_output_weights = F::softmax(attn_output_weights, /*dim=*/-1);
908
+ attn_output_weights = F::dropout(
909
+ attn_output_weights,
910
+ F::DropoutFuncOptions().p(dropout_p).training(training));
911
+ auto attn_output = torch::bmm(attn_output_weights, v);
912
+ TORCH_CHECK(
913
+ attn_output.sizes() == IntArrayRef({bsz * num_heads, tgt_len, head_dim}));
914
+ attn_output =
915
+ attn_output.transpose(0, 1).contiguous().view({tgt_len, bsz, embed_dim});
916
+ attn_output = F::linear(attn_output, out_proj_weight, out_proj_bias);
917
+ if (need_weights) {
918
+ attn_output_weights =
919
+ attn_output_weights.view({bsz, num_heads, tgt_len, src_len});
920
+ if (average_attn_weights) {
921
+ // average attention weights over heads
922
+ attn_output_weights = attn_output_weights.sum(/*dim=*/1) / num_heads;
923
+ }
924
+ return std::make_tuple(attn_output, attn_output_weights);
925
+ } else {
926
+ return std::make_tuple(attn_output, Tensor());
927
+ }
928
+ }
929
+ } // namespace detail
930
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
931
+
932
+ inline std::tuple<Tensor, Tensor> multi_head_attention_forward(
933
+ const Tensor& query,
934
+ const Tensor& key,
935
+ const Tensor& value,
936
+ const MultiheadAttentionForwardFuncOptions& options) {
937
+ return detail::multi_head_attention_forward(
938
+ query,
939
+ key,
940
+ value,
941
+ options.embed_dim_to_check(),
942
+ options.num_heads(),
943
+ options.in_proj_weight(),
944
+ options.in_proj_bias(),
945
+ options.bias_k(),
946
+ options.bias_v(),
947
+ options.add_zero_attn(),
948
+ options.dropout_p(),
949
+ options.out_proj_weight(),
950
+ options.out_proj_bias(),
951
+ options.training(),
952
+ options.key_padding_mask(),
953
+ options.need_weights(),
954
+ options.attn_mask(),
955
+ options.use_separate_proj_weight(),
956
+ options.q_proj_weight(),
957
+ options.k_proj_weight(),
958
+ options.v_proj_weight(),
959
+ options.static_k(),
960
+ options.static_v(),
961
+ options.average_attn_weights());
962
+ }
963
+
964
+ } // namespace functional
965
+ } // namespace nn
966
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/batchnorm.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/nn/options/batchnorm.h>
5
+ #include <torch/types.h>
6
+
7
+ namespace torch {
8
+ namespace nn {
9
+ namespace functional {
10
+
11
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
12
+ namespace detail {
13
+ inline Tensor batch_norm(
14
+ const Tensor& input,
15
+ const Tensor& running_mean,
16
+ const Tensor& running_var,
17
+ Tensor weight,
18
+ Tensor bias,
19
+ bool training,
20
+ c10::optional<double> momentum,
21
+ double eps) {
22
+ TORCH_CHECK(
23
+ input.dim() >= 2,
24
+ "Expected at least 2 input dimensions, but got ",
25
+ input.dim());
26
+ if (training) {
27
+ auto size = input.sizes();
28
+ int64_t size_prods = size[0];
29
+ for (const auto i : c10::irange(size.size() - 2)) {
30
+ size_prods *= size[i + 2];
31
+ }
32
+ TORCH_CHECK(
33
+ size_prods != 1,
34
+ "Expected more than 1 value per channel when training, got input size ",
35
+ size);
36
+ }
37
+
38
+ return torch::batch_norm(
39
+ input,
40
+ weight,
41
+ bias,
42
+ running_mean,
43
+ running_var,
44
+ training,
45
+ momentum.value(),
46
+ eps,
47
+ at::globalContext().userEnabledCuDNN());
48
+ }
49
+ } // namespace detail
50
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
51
+
52
+ /// See
53
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.batch_norm
54
+ /// about the exact behavior of this functional.
55
+ ///
56
+ /// See the documentation for `torch::nn::functional::BatchNormFuncOptions`
57
+ /// class to learn what optional arguments are supported for this functional.
58
+ ///
59
+ /// Example:
60
+ /// ```
61
+ /// namespace F = torch::nn::functional;
62
+ /// F::batch_norm(input, mean, variance,
63
+ /// F::BatchNormFuncOptions().weight(weight).bias(bias).momentum(0.1).eps(1e-05).training(false));
64
+ /// ```
65
+ inline Tensor batch_norm(
66
+ const Tensor& input,
67
+ const Tensor& running_mean,
68
+ const Tensor& running_var,
69
+ const BatchNormFuncOptions& options = {}) {
70
+ return detail::batch_norm(
71
+ input,
72
+ running_mean,
73
+ running_var,
74
+ options.weight(),
75
+ options.bias(),
76
+ options.training(),
77
+ options.momentum(),
78
+ options.eps());
79
+ }
80
+
81
+ } // namespace functional
82
+ } // namespace nn
83
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/conv.h ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/options/conv.h>
4
+ #include <torch/types.h>
5
+
6
+ namespace torch {
7
+ namespace nn {
8
+ namespace functional {
9
+
10
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
11
+ namespace detail {
12
+
13
+ inline std::string padding_unwrap(enumtype::kValid) {
14
+ return "valid";
15
+ }
16
+
17
+ inline std::string padding_unwrap(enumtype::kSame) {
18
+ return "same";
19
+ }
20
+
21
+ template <size_t D>
22
+ IntArrayRef padding_unwrap(const ExpandingArray<D>& array) {
23
+ return array;
24
+ }
25
+
26
+ inline Tensor conv1d(
27
+ const Tensor& input,
28
+ const Tensor& weight,
29
+ const Tensor& bias,
30
+ ExpandingArray<1> stride,
31
+ const Conv1dFuncOptions::padding_t& padding,
32
+ ExpandingArray<1> dilation,
33
+ int64_t groups) {
34
+ return std::visit(
35
+ [&](const auto& pad) {
36
+ return torch::conv1d(
37
+ input, weight, bias, stride, padding_unwrap(pad), dilation, groups);
38
+ },
39
+ padding);
40
+ }
41
+ } // namespace detail
42
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
43
+
44
+ /// See
45
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv1d
46
+ /// about the exact behavior of this functional.
47
+ ///
48
+ /// See the documentation for `torch::nn::functional::Conv1dFuncOptions` class
49
+ /// to learn what optional arguments are supported for this functional.
50
+ ///
51
+ /// Example:
52
+ /// ```
53
+ /// namespace F = torch::nn::functional;
54
+ /// F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1));
55
+ /// ```
56
+ inline Tensor conv1d(
57
+ const Tensor& input,
58
+ const Tensor& weight,
59
+ const Conv1dFuncOptions& options = {}) {
60
+ return detail::conv1d(
61
+ input,
62
+ weight,
63
+ options.bias(),
64
+ options.stride(),
65
+ options.padding(),
66
+ options.dilation(),
67
+ options.groups());
68
+ }
69
+
70
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
71
+ namespace detail {
72
+ inline Tensor conv2d(
73
+ const Tensor& input,
74
+ const Tensor& weight,
75
+ const Tensor& bias,
76
+ ExpandingArray<2> stride,
77
+ const Conv2dFuncOptions::padding_t& padding,
78
+ ExpandingArray<2> dilation,
79
+ int64_t groups) {
80
+ return std::visit(
81
+ [&](const auto& pad) {
82
+ return torch::conv2d(
83
+ input, weight, bias, stride, padding_unwrap(pad), dilation, groups);
84
+ },
85
+ padding);
86
+ }
87
+ } // namespace detail
88
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
89
+
90
+ /// See
91
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv2d
92
+ /// about the exact behavior of this functional.
93
+ ///
94
+ /// See the documentation for `torch::nn::functional::Conv2dFuncOptions` class
95
+ /// to learn what optional arguments are supported for this functional.
96
+ ///
97
+ /// Example:
98
+ /// ```
99
+ /// namespace F = torch::nn::functional;
100
+ /// F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1));
101
+ /// ```
102
+ inline Tensor conv2d(
103
+ const Tensor& input,
104
+ const Tensor& weight,
105
+ const Conv2dFuncOptions& options = {}) {
106
+ return detail::conv2d(
107
+ input,
108
+ weight,
109
+ options.bias(),
110
+ options.stride(),
111
+ options.padding(),
112
+ options.dilation(),
113
+ options.groups());
114
+ }
115
+
116
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
117
+ namespace detail {
118
+ inline Tensor conv3d(
119
+ const Tensor& input,
120
+ const Tensor& weight,
121
+ const Tensor& bias,
122
+ ExpandingArray<3> stride,
123
+ const Conv3dFuncOptions::padding_t& padding,
124
+ ExpandingArray<3> dilation,
125
+ int64_t groups) {
126
+ return std::visit(
127
+ [&](const auto& pad) {
128
+ return torch::conv3d(
129
+ input, weight, bias, stride, padding_unwrap(pad), dilation, groups);
130
+ },
131
+ padding);
132
+ }
133
+ } // namespace detail
134
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
135
+
136
+ /// See
137
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv3d
138
+ /// about the exact behavior of this functional.
139
+ ///
140
+ /// See the documentation for `torch::nn::functional::Conv3dFuncOptions` class
141
+ /// to learn what optional arguments are supported for this functional.
142
+ ///
143
+ /// Example:
144
+ /// ```
145
+ /// namespace F = torch::nn::functional;
146
+ /// F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1));
147
+ /// ```
148
+ inline Tensor conv3d(
149
+ const Tensor& input,
150
+ const Tensor& weight,
151
+ const Conv3dFuncOptions& options = {}) {
152
+ return detail::conv3d(
153
+ input,
154
+ weight,
155
+ options.bias(),
156
+ options.stride(),
157
+ options.padding(),
158
+ options.dilation(),
159
+ options.groups());
160
+ }
161
+
162
+ // ============================================================================
163
+
164
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
165
+ namespace detail {
166
+ inline Tensor conv_transpose1d(
167
+ const Tensor& input,
168
+ const Tensor& weight,
169
+ const Tensor& bias,
170
+ IntArrayRef stride,
171
+ IntArrayRef padding,
172
+ IntArrayRef output_padding,
173
+ int64_t groups,
174
+ IntArrayRef dilation) {
175
+ return torch::conv_transpose1d(
176
+ input, weight, bias, stride, padding, output_padding, groups, dilation);
177
+ }
178
+ } // namespace detail
179
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
180
+
181
+ /// See
182
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose1d
183
+ /// about the exact behavior of this functional.
184
+ ///
185
+ /// See the documentation for
186
+ /// `torch::nn::functional::ConvTranspose1dFuncOptions` class to learn what
187
+ /// optional arguments are supported for this functional.
188
+ ///
189
+ /// Example:
190
+ /// ```
191
+ /// namespace F = torch::nn::functional;
192
+ /// F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1));
193
+ /// ```
194
+ inline Tensor conv_transpose1d(
195
+ const Tensor& input,
196
+ const Tensor& weight,
197
+ const ConvTranspose1dFuncOptions& options = {}) {
198
+ return detail::conv_transpose1d(
199
+ input,
200
+ weight,
201
+ options.bias(),
202
+ options.stride(),
203
+ options.padding(),
204
+ options.output_padding(),
205
+ options.groups(),
206
+ options.dilation());
207
+ }
208
+
209
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
210
+ namespace detail {
211
+ inline Tensor conv_transpose2d(
212
+ const Tensor& input,
213
+ const Tensor& weight,
214
+ const Tensor& bias,
215
+ IntArrayRef stride,
216
+ IntArrayRef padding,
217
+ IntArrayRef output_padding,
218
+ int64_t groups,
219
+ IntArrayRef dilation) {
220
+ return torch::conv_transpose2d(
221
+ input, weight, bias, stride, padding, output_padding, groups, dilation);
222
+ }
223
+ } // namespace detail
224
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
225
+
226
+ /// See
227
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose2d
228
+ /// about the exact behavior of this functional.
229
+ ///
230
+ /// See the documentation for
231
+ /// `torch::nn::functional::ConvTranspose2dFuncOptions` class to learn what
232
+ /// optional arguments are supported for this functional.
233
+ ///
234
+ /// Example:
235
+ /// ```
236
+ /// namespace F = torch::nn::functional;
237
+ /// F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1));
238
+ /// ```
239
+ inline Tensor conv_transpose2d(
240
+ const Tensor& input,
241
+ const Tensor& weight,
242
+ const ConvTranspose2dFuncOptions& options = {}) {
243
+ return detail::conv_transpose2d(
244
+ input,
245
+ weight,
246
+ options.bias(),
247
+ options.stride(),
248
+ options.padding(),
249
+ options.output_padding(),
250
+ options.groups(),
251
+ options.dilation());
252
+ }
253
+
254
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
255
+ namespace detail {
256
+ inline Tensor conv_transpose3d(
257
+ const Tensor& input,
258
+ const Tensor& weight,
259
+ const Tensor& bias,
260
+ IntArrayRef stride,
261
+ IntArrayRef padding,
262
+ IntArrayRef output_padding,
263
+ int64_t groups,
264
+ IntArrayRef dilation) {
265
+ return torch::conv_transpose3d(
266
+ input, weight, bias, stride, padding, output_padding, groups, dilation);
267
+ }
268
+ } // namespace detail
269
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
270
+
271
+ /// See
272
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose3d
273
+ /// about the exact behavior of this functional.
274
+ ///
275
+ /// See the documentation for
276
+ /// `torch::nn::functional::ConvTranspose3dFuncOptions` class to learn what
277
+ /// optional arguments are supported for this functional.
278
+ ///
279
+ /// Example:
280
+ /// ```
281
+ /// namespace F = torch::nn::functional;
282
+ /// F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1));
283
+ /// ```
284
+ inline Tensor conv_transpose3d(
285
+ const Tensor& input,
286
+ const Tensor& weight,
287
+ const ConvTranspose3dFuncOptions& options = {}) {
288
+ return detail::conv_transpose3d(
289
+ input,
290
+ weight,
291
+ options.bias(),
292
+ options.stride(),
293
+ options.padding(),
294
+ options.output_padding(),
295
+ options.groups(),
296
+ options.dilation());
297
+ }
298
+
299
+ } // namespace functional
300
+ } // namespace nn
301
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/distance.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/options/distance.h>
4
+
5
+ namespace torch {
6
+ namespace nn {
7
+ namespace functional {
8
+
9
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
10
+ namespace detail {
11
+ inline Tensor cosine_similarity(
12
+ const Tensor& x1,
13
+ const Tensor& x2,
14
+ int64_t dim,
15
+ double eps) {
16
+ return torch::cosine_similarity(x1, x2, dim, eps);
17
+ }
18
+ } // namespace detail
19
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
20
+
21
+ /// See
22
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cosine_similarity
23
+ /// about the exact behavior of this functional.
24
+ ///
25
+ /// See the documentation for
26
+ /// `torch::nn::functional::CosineSimilarityFuncOptions` class to learn what
27
+ /// optional arguments are supported for this functional.
28
+ ///
29
+ /// Example:
30
+ /// ```
31
+ /// namespace F = torch::nn::functional;
32
+ /// F::cosine_similarity(input1, input2,
33
+ /// F::CosineSimilarityFuncOptions().dim(1));
34
+ /// ```
35
+ inline Tensor cosine_similarity(
36
+ const Tensor& x1,
37
+ const Tensor& x2,
38
+ const CosineSimilarityFuncOptions& options = {}) {
39
+ return detail::cosine_similarity(x1, x2, options.dim(), options.eps());
40
+ }
41
+
42
+ // ============================================================================
43
+
44
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
45
+ namespace detail {
46
+ inline Tensor pairwise_distance(
47
+ const Tensor& x1,
48
+ const Tensor& x2,
49
+ double p,
50
+ double eps,
51
+ bool keepdim) {
52
+ return torch::pairwise_distance(x1, x2, p, eps, keepdim);
53
+ }
54
+ } // namespace detail
55
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
56
+
57
+ /// See
58
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pairwise_distance
59
+ /// about the exact behavior of this functional.
60
+ ///
61
+ /// See the documentation for
62
+ /// `torch::nn::functional::PairwiseDistanceFuncOptions` class to learn what
63
+ /// optional arguments are supported for this functional.
64
+ ///
65
+ /// Example:
66
+ /// ```
67
+ /// namespace F = torch::nn::functional;
68
+ /// F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1));
69
+ /// ```
70
+ inline Tensor pairwise_distance(
71
+ const Tensor& x1,
72
+ const Tensor& x2,
73
+ const PairwiseDistanceFuncOptions& options = {}) {
74
+ return detail::pairwise_distance(
75
+ x1, x2, options.p(), options.eps(), options.keepdim());
76
+ }
77
+
78
+ // ============================================================================
79
+
80
+ /// Computes the p-norm distance between every pair of row vectors in the input.
81
+ /// This function will be faster if the rows are contiguous.
82
+ inline Tensor pdist(const Tensor& input, double p = 2.0) {
83
+ return torch::pdist(input, p);
84
+ }
85
+
86
+ } // namespace functional
87
+ } // namespace nn
88
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/dropout.h ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/options/dropout.h>
4
+
5
+ #include <utility>
6
+
7
+ namespace torch {
8
+ namespace nn {
9
+ namespace functional {
10
+
11
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
12
+ namespace detail {
13
+
14
+ inline Tensor dropout(Tensor input, double p, bool training, bool inplace) {
15
+ TORCH_CHECK(
16
+ p >= 0. && p <= 1.,
17
+ "dropout probability has to be between 0 and 1, but got ",
18
+ p);
19
+ if (inplace) {
20
+ return torch::dropout_(input, p, training);
21
+ } else {
22
+ return torch::dropout(input, p, training);
23
+ }
24
+ }
25
+
26
+ } // namespace detail
27
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
28
+
29
+ /// See
30
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout
31
+ /// about the exact behavior of this functional.
32
+ ///
33
+ /// See the documentation for `torch::nn::functional::DropoutFuncOptions` class
34
+ /// to learn what optional arguments are supported for this functional.
35
+ ///
36
+ /// Example:
37
+ /// ```
38
+ /// namespace F = torch::nn::functional;
39
+ /// F::dropout(input, F::DropoutFuncOptions().p(0.5));
40
+ /// ```
41
+ inline Tensor dropout(Tensor input, const DropoutFuncOptions& options = {}) {
42
+ return detail::dropout(
43
+ std::move(input), options.p(), options.training(), options.inplace());
44
+ }
45
+
46
+ // ============================================================================
47
+
48
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
49
+ namespace detail {
50
+
51
+ template <int64_t unbatched_dim, int64_t batched_dim>
52
+ inline Tensor _dropoutNd_helper(
53
+ Tensor input,
54
+ double p,
55
+ bool training,
56
+ bool inplace,
57
+ const char* fn_name) {
58
+ TORCH_CHECK(
59
+ p >= 0. && p <= 1.,
60
+ "dropout probability has to be between 0 and 1, but got ",
61
+ p);
62
+
63
+ auto inp_dim = input.dim();
64
+ auto is_batched = inp_dim == batched_dim;
65
+ if (!is_batched) {
66
+ if (inplace) {
67
+ input = input.unsqueeze_(0);
68
+ } else {
69
+ input = input.unsqueeze(0);
70
+ }
71
+ }
72
+
73
+ Tensor result;
74
+ if (inplace) {
75
+ result = torch::feature_dropout_(input, p, training);
76
+ } else {
77
+ result = torch::feature_dropout(input, p, training);
78
+ }
79
+
80
+ if (!is_batched) {
81
+ if (inplace) {
82
+ result = result.squeeze_(0);
83
+ } else {
84
+ result = result.squeeze(0);
85
+ }
86
+ }
87
+ return result;
88
+ }
89
+
90
+ inline Tensor dropout2d(Tensor input, double p, bool training, bool inplace) {
91
+ return _dropoutNd_helper<3, 4>(
92
+ std::move(input), p, training, inplace, "dropout2d");
93
+ }
94
+
95
+ } // namespace detail
96
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
97
+
98
+ /// See
99
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout2d
100
+ /// about the exact behavior of this functional.
101
+ ///
102
+ /// See the documentation for `torch::nn::functional::Dropout2dFuncOptions`
103
+ /// class to learn what optional arguments are supported for this functional.
104
+ ///
105
+ /// Example:
106
+ /// ```
107
+ /// namespace F = torch::nn::functional;
108
+ /// F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5));
109
+ /// ```
110
+ inline Tensor dropout2d(
111
+ Tensor input,
112
+ const Dropout2dFuncOptions& options = {}) {
113
+ return detail::dropout2d(
114
+ std::move(input), options.p(), options.training(), options.inplace());
115
+ }
116
+
117
+ // ============================================================================
118
+
119
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
120
+ namespace detail {
121
+
122
+ inline Tensor dropout3d(Tensor input, double p, bool training, bool inplace) {
123
+ return _dropoutNd_helper<4, 5>(
124
+ std::move(input), p, training, inplace, "dropout3d");
125
+ }
126
+
127
+ } // namespace detail
128
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
129
+
130
+ /// See
131
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout3d
132
+ /// about the exact behavior of this functional.
133
+ ///
134
+ /// See the documentation for `torch::nn::functional::Dropout3dFuncOptions`
135
+ /// class to learn what optional arguments are supported for this functional.
136
+ ///
137
+ /// Example:
138
+ /// ```
139
+ /// namespace F = torch::nn::functional;
140
+ /// F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5));
141
+ /// ```
142
+ inline Tensor dropout3d(
143
+ Tensor input,
144
+ const Dropout3dFuncOptions& options = {}) {
145
+ return detail::dropout3d(
146
+ std::move(input), options.p(), options.training(), options.inplace());
147
+ }
148
+
149
+ // ============================================================================
150
+
151
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
152
+ namespace detail {
153
+
154
+ inline Tensor alpha_dropout(
155
+ Tensor input,
156
+ double p,
157
+ bool training,
158
+ bool inplace) {
159
+ if (p < 0. || p > 1.) {
160
+ TORCH_CHECK(
161
+ false, "dropout probability has to be between 0 and 1, but got ", p);
162
+ }
163
+ return inplace ? torch::alpha_dropout_(input, p, training)
164
+ : torch::alpha_dropout(input, p, training);
165
+ }
166
+
167
+ } // namespace detail
168
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
169
+
170
+ /// See
171
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.alpha_dropout
172
+ /// about the exact behavior of this functional.
173
+ ///
174
+ /// See the documentation for `torch::nn::functional::AlphaDropoutFuncOptions`
175
+ /// class to learn what optional arguments are supported for this functional.
176
+ ///
177
+ /// Example:
178
+ /// ```
179
+ /// namespace F = torch::nn::functional;
180
+ /// F::alpha_dropout(input,
181
+ /// F::AlphaDropoutFuncOptions().p(0.5).training(false));
182
+ /// ```
183
+ inline Tensor alpha_dropout(
184
+ Tensor input,
185
+ const AlphaDropoutFuncOptions& options = {}) {
186
+ return detail::alpha_dropout(
187
+ std::move(input), options.p(), options.training(), options.inplace());
188
+ }
189
+
190
+ // ============================================================================
191
+
192
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
193
+ namespace detail {
194
+
195
+ inline Tensor feature_alpha_dropout(
196
+ Tensor input,
197
+ double p,
198
+ bool training,
199
+ bool inplace) {
200
+ if (p < 0. || p > 1.) {
201
+ TORCH_CHECK(
202
+ false, "dropout probability has to be between 0 and 1, but got ", p);
203
+ }
204
+ return inplace ? torch::feature_alpha_dropout_(input, p, training)
205
+ : torch::feature_alpha_dropout(input, p, training);
206
+ }
207
+
208
+ } // namespace detail
209
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
210
+
211
+ /// See
212
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.feature_alpha_dropout
213
+ /// about the exact behavior of this functional.
214
+ ///
215
+ /// See the documentation for
216
+ /// `torch::nn::functional::FeatureAlphaDropoutFuncOptions` class to learn what
217
+ /// optional arguments are supported for this functional.
218
+ ///
219
+ /// Example:
220
+ /// ```
221
+ /// namespace F = torch::nn::functional;
222
+ /// F::feature_alpha_dropout(input,
223
+ /// F::FeatureAlphaDropoutFuncOptions().p(0.5).training(false));
224
+ /// ```
225
+ inline Tensor feature_alpha_dropout(
226
+ Tensor input,
227
+ const FeatureAlphaDropoutFuncOptions& options = {}) {
228
+ return detail::feature_alpha_dropout(
229
+ std::move(input), options.p(), options.training(), options.inplace());
230
+ }
231
+
232
+ } // namespace functional
233
+ } // namespace nn
234
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/embedding.h ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/options/embedding.h>
4
+
5
+ namespace torch {
6
+ namespace nn {
7
+ namespace functional {
8
+
9
+ inline Tensor one_hot(const Tensor& tensor, int64_t num_classes = -1) {
10
+ return torch::one_hot(tensor, num_classes);
11
+ }
12
+
13
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
14
+ namespace detail {
15
+ inline void _no_grad_embedding_renorm_(
16
+ Tensor weight,
17
+ const Tensor& input,
18
+ float max_norm,
19
+ float norm_type) {
20
+ torch::NoGradGuard no_grad;
21
+ torch::embedding_renorm_(weight, input, max_norm, norm_type);
22
+ }
23
+
24
+ inline Tensor embedding(
25
+ const Tensor& input,
26
+ const Tensor& weight,
27
+ c10::optional<int64_t> padding_idx,
28
+ c10::optional<double> max_norm,
29
+ double norm_type,
30
+ bool scale_grad_by_freq,
31
+ bool sparse) {
32
+ auto input_ = input;
33
+
34
+ if (padding_idx != c10::nullopt) {
35
+ if (*padding_idx > 0) {
36
+ TORCH_CHECK(
37
+ *padding_idx < weight.size(0),
38
+ "Padding_idx must be within num_embeddings");
39
+ } else if (*padding_idx < 0) {
40
+ TORCH_CHECK(
41
+ *padding_idx >= -weight.size(0),
42
+ "Padding_idx must be within num_embedding");
43
+ padding_idx = weight.size(0) + *padding_idx;
44
+ }
45
+ } else {
46
+ padding_idx = -1;
47
+ }
48
+
49
+ if (max_norm != c10::nullopt) {
50
+ input_ = input_.contiguous();
51
+ // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
52
+ _no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type);
53
+ }
54
+ return torch::embedding(
55
+ weight, input_, *padding_idx, scale_grad_by_freq, sparse);
56
+ }
57
+ } // namespace detail
58
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
59
+
60
+ /// See
61
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.embedding
62
+ /// about the exact behavior of this functional.
63
+ ///
64
+ /// See the documentation for `torch::nn::functional::EmbeddingFuncOptions`
65
+ /// class to learn what optional arguments are supported for this functional.
66
+ ///
67
+ /// Example:
68
+ /// ```
69
+ /// namespace F = torch::nn::functional;
70
+ /// F::embedding(input, weight,
71
+ /// F::EmbeddingFuncOptions().norm_type(2.5).scale_grad_by_freq(true).sparse(true));
72
+ /// ```
73
+ inline Tensor embedding(
74
+ const Tensor& input,
75
+ const Tensor& weight,
76
+ const EmbeddingFuncOptions& options = {}) {
77
+ return detail::embedding(
78
+ input,
79
+ weight,
80
+ options.padding_idx(),
81
+ options.max_norm(),
82
+ options.norm_type(),
83
+ options.scale_grad_by_freq(),
84
+ options.sparse());
85
+ }
86
+
87
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
88
+ namespace detail {
89
+ inline Tensor embedding_bag(
90
+ const Tensor& input,
91
+ const Tensor& weight,
92
+ const Tensor& offsets,
93
+ c10::optional<double> max_norm,
94
+ double norm_type,
95
+ bool scale_grad_by_freq,
96
+ EmbeddingBagMode mode,
97
+ bool sparse,
98
+ const Tensor& per_sample_weights,
99
+ bool include_last_offset,
100
+ c10::optional<int64_t> padding_idx) {
101
+ auto input_ = input;
102
+ auto offsets_ = offsets;
103
+ auto per_sample_weights_ = per_sample_weights;
104
+ TORCH_CHECK(
105
+ !per_sample_weights_.defined() ||
106
+ input_.sizes() == per_sample_weights_.sizes(),
107
+ "embedding_bag: If per_sample_weights (",
108
+ per_sample_weights_.sizes(),
109
+ ") is not null, then it must have the same shape as the input (",
110
+ input_.sizes(),
111
+ ")");
112
+ if (input_.dim() == 2) {
113
+ TORCH_CHECK(
114
+ !offsets_.defined(),
115
+ "If input is 2D, then offsets has to be null, as input is treated is a mini-batch of fixed length sequences. However, found offsets of type Tensor");
116
+ offsets_ = torch::arange(
117
+ 0,
118
+ input_.numel(),
119
+ input_.size(1),
120
+ torch::TensorOptions().dtype(torch::kLong).device(input_.device()));
121
+ input_ = input_.reshape(-1);
122
+ if (per_sample_weights_.defined()) {
123
+ per_sample_weights_ = per_sample_weights_.reshape(-1);
124
+ }
125
+ } else if (input_.dim() == 1) {
126
+ TORCH_CHECK(
127
+ offsets_.defined(), "offsets has to be a 1D Tensor but got null");
128
+ TORCH_CHECK(offsets_.dim() == 1, "offsets has to be a 1D Tensor");
129
+ } else {
130
+ TORCH_CHECK(
131
+ false,
132
+ "input has to be 1D or 2D Tensor, but got Tensor of dimension ",
133
+ input_.dim());
134
+ }
135
+
136
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
137
+ int mode_enum;
138
+ if (std::holds_alternative<enumtype::kSum>(mode)) {
139
+ mode_enum = 0;
140
+ } else if (std::holds_alternative<enumtype::kMean>(mode)) {
141
+ mode_enum = 1;
142
+ } else if (std::holds_alternative<enumtype::kMax>(mode)) {
143
+ mode_enum = 2;
144
+ TORCH_CHECK(
145
+ !scale_grad_by_freq,
146
+ "max mode does not support scaling the gradient by the frequency");
147
+ TORCH_CHECK(!sparse, "max mode does not support sparse weights");
148
+ } else {
149
+ TORCH_CHECK(false, "mode has to be one of sum, mean or max");
150
+ }
151
+
152
+ if (max_norm != c10::nullopt) {
153
+ // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
154
+ _no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type);
155
+ }
156
+
157
+ TORCH_CHECK(
158
+ !per_sample_weights_.defined() || std::get_if<enumtype::kSum>(&mode),
159
+ "embedding_bag: per_sample_weights was not null. ",
160
+ "per_sample_weights is only supported for mode='kSum' (got mode='",
161
+ torch::enumtype::get_enum_name(mode),
162
+ "').Please open a feature request on GitHub.");
163
+
164
+ return std::get<0>(torch::embedding_bag(
165
+ weight,
166
+ input_,
167
+ offsets_,
168
+ scale_grad_by_freq,
169
+ mode_enum,
170
+ sparse,
171
+ per_sample_weights_,
172
+ include_last_offset,
173
+ padding_idx));
174
+ }
175
+ } // namespace detail
176
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
177
+
178
+ /// See
179
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.embedding_bag
180
+ /// about the exact behavior of this functional.
181
+ ///
182
+ /// See the documentation for `torch::nn::functional::EmbeddingBagFuncOptions`
183
+ /// class to learn what optional arguments are supported for this functional.
184
+ ///
185
+ /// Example:
186
+ /// ```
187
+ /// namespace F = torch::nn::functional;
188
+ /// F::embedding_bag(input, weight,
189
+ /// F::EmbeddingBagFuncOptions().mode(torch::kSum).offsets(offsets));
190
+ /// ```
191
+ inline Tensor embedding_bag(
192
+ const Tensor& input,
193
+ const Tensor& weight,
194
+ const EmbeddingBagFuncOptions& options = {}) {
195
+ return detail::embedding_bag(
196
+ input,
197
+ weight,
198
+ options.offsets(),
199
+ options.max_norm(),
200
+ options.norm_type(),
201
+ options.scale_grad_by_freq(),
202
+ options.mode(),
203
+ options.sparse(),
204
+ options.per_sample_weights(),
205
+ options.include_last_offset(),
206
+ options.padding_idx());
207
+ }
208
+
209
+ } // namespace functional
210
+ } // namespace nn
211
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/fold.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/options/fold.h>
4
+
5
+ namespace torch {
6
+ namespace nn {
7
+ namespace functional {
8
+
9
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
10
+ namespace detail {
11
+ inline Tensor fold(
12
+ const Tensor& input,
13
+ ExpandingArray<2> output_size,
14
+ ExpandingArray<2> kernel_size,
15
+ ExpandingArray<2> dilation,
16
+ ExpandingArray<2> padding,
17
+ ExpandingArray<2> stride) {
18
+ if (input.dim() == 3 || input.dim() == 2) {
19
+ return torch::col2im(
20
+ input, output_size, kernel_size, dilation, padding, stride);
21
+ } else {
22
+ TORCH_CHECK(
23
+ false,
24
+ "Input Error: Only unbatched (2D) or batched (3D) input Tensors are supported "
25
+ "(got ",
26
+ input.dim(),
27
+ "D)");
28
+ }
29
+ }
30
+ } // namespace detail
31
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
32
+
33
+ /// See
34
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.fold
35
+ /// about the exact behavior of this functional.
36
+ ///
37
+ /// See the documentation for `torch::nn::functional::FoldFuncOptions` class to
38
+ /// learn what optional arguments are supported for this functional.
39
+ ///
40
+ /// Example:
41
+ /// ```
42
+ /// namespace F = torch::nn::functional;
43
+ /// F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2}));
44
+ /// ```
45
+ inline Tensor fold(const Tensor& input, const FoldFuncOptions& options) {
46
+ return detail::fold(
47
+ input,
48
+ options.output_size(),
49
+ options.kernel_size(),
50
+ options.dilation(),
51
+ options.padding(),
52
+ options.stride());
53
+ }
54
+
55
+ // ============================================================================
56
+
57
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
58
+ namespace detail {
59
+ inline Tensor unfold(
60
+ const Tensor& input,
61
+ ExpandingArray<2> kernel_size,
62
+ ExpandingArray<2> dilation,
63
+ ExpandingArray<2> padding,
64
+ ExpandingArray<2> stride) {
65
+ if (input.dim() == 4) {
66
+ return torch::im2col(input, kernel_size, dilation, padding, stride);
67
+ } else {
68
+ TORCH_CHECK(
69
+ false,
70
+ "Input Error: Only 4D input Tensors are supported "
71
+ "(got ",
72
+ input.dim(),
73
+ "D)");
74
+ }
75
+ }
76
+ } // namespace detail
77
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
78
+
79
+ /// See
80
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.unfold
81
+ /// about the exact behavior of this functional.
82
+ ///
83
+ /// See the documentation for `torch::nn::functional::UnfoldFuncOptions` class
84
+ /// to learn what optional arguments are supported for this functional.
85
+ ///
86
+ /// Example:
87
+ /// ```
88
+ /// namespace F = torch::nn::functional;
89
+ /// F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2));
90
+ /// ```
91
+ inline Tensor unfold(const Tensor& input, const UnfoldFuncOptions& options) {
92
+ return detail::unfold(
93
+ input,
94
+ options.kernel_size(),
95
+ options.dilation(),
96
+ options.padding(),
97
+ options.stride());
98
+ }
99
+
100
+ } // namespace functional
101
+ } // namespace nn
102
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/instancenorm.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/options/instancenorm.h>
4
+
5
+ namespace torch {
6
+ namespace nn {
7
+ namespace functional {
8
+
9
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
10
+ namespace detail {
11
+ inline Tensor instance_norm(
12
+ const Tensor& input,
13
+ const Tensor& running_mean,
14
+ const Tensor& running_var,
15
+ const Tensor& weight,
16
+ const Tensor& bias,
17
+ bool use_input_stats,
18
+ double momentum,
19
+ double eps) {
20
+ return torch::instance_norm(
21
+ input,
22
+ weight,
23
+ bias,
24
+ running_mean,
25
+ running_var,
26
+ use_input_stats,
27
+ momentum,
28
+ eps,
29
+ at::globalContext().userEnabledCuDNN());
30
+ }
31
+ } // namespace detail
32
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
33
+
34
+ /// See
35
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.instance_norm
36
+ /// about the exact behavior of this functional.
37
+ ///
38
+ /// See the documentation for `torch::nn::functional::InstanceNormFuncOptions`
39
+ /// class to learn what optional arguments are supported for this functional.
40
+ ///
41
+ /// Example:
42
+ /// ```
43
+ /// namespace F = torch::nn::functional;
44
+ /// F::instance_norm(input,
45
+ /// F::InstanceNormFuncOptions().running_mean(mean).running_var(variance).weight(weight).bias(bias).momentum(0.1).eps(1e-5));
46
+ /// ```
47
+ inline Tensor instance_norm(
48
+ const Tensor& input,
49
+ const InstanceNormFuncOptions& options = {}) {
50
+ return detail::instance_norm(
51
+ input,
52
+ options.running_mean(),
53
+ options.running_var(),
54
+ options.weight(),
55
+ options.bias(),
56
+ options.use_input_stats(),
57
+ options.momentum(),
58
+ options.eps());
59
+ }
60
+
61
+ } // namespace functional
62
+ } // namespace nn
63
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/linear.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/types.h>
4
+
5
+ namespace torch {
6
+ namespace nn {
7
+ namespace functional {
8
+
9
+ inline Tensor bilinear(
10
+ const Tensor& input1,
11
+ const Tensor& input2,
12
+ const Tensor& weight,
13
+ const Tensor& bias = Tensor()) {
14
+ return torch::bilinear(input1, input2, weight, bias);
15
+ }
16
+
17
+ // ============================================================================
18
+
19
+ inline Tensor linear(
20
+ const Tensor& input,
21
+ const Tensor& weight,
22
+ const Tensor& bias = {}) {
23
+ if (input.dim() == 2 && bias.defined()) {
24
+ // fused op is marginally faster
25
+ return torch::addmm(bias, input, weight.t());
26
+ } else {
27
+ auto output = input.matmul(weight.t());
28
+ if (bias.defined()) {
29
+ output += bias;
30
+ }
31
+ return output;
32
+ }
33
+ }
34
+
35
+ } // namespace functional
36
+ } // namespace nn
37
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/loss.h ADDED
@@ -0,0 +1,1044 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ExpandUtils.h>
4
+ #include <torch/nn/functional/activation.h>
5
+ #include <torch/nn/options/loss.h>
6
+
7
+ namespace torch {
8
+ namespace nn {
9
+ namespace functional {
10
+
11
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
12
+ namespace detail {
13
+ inline Tensor l1_loss(
14
+ const Tensor& input,
15
+ const Tensor& target,
16
+ L1LossFuncOptions::reduction_t reduction) {
17
+ return torch::l1_loss(input, target, enumtype::reduction_get_enum(reduction));
18
+ }
19
+ } // namespace detail
20
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
21
+
22
+ /// See
23
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.l1_loss
24
+ /// about the exact behavior of this functional.
25
+ ///
26
+ /// See the documentation for `torch::nn::functional::L1LossFuncOptions` class
27
+ /// to learn what optional arguments are supported for this functional.
28
+ ///
29
+ /// Example:
30
+ /// ```
31
+ /// namespace F = torch::nn::functional;
32
+ /// F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone));
33
+ /// ```
34
+ inline Tensor l1_loss(
35
+ const Tensor& input,
36
+ const Tensor& target,
37
+ const L1LossFuncOptions& options = {}) {
38
+ return detail::l1_loss(input, target, options.reduction());
39
+ }
40
+
41
+ // ============================================================================
42
+
43
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
44
+ namespace detail {
45
+ inline Tensor kl_div(
46
+ const Tensor& input,
47
+ const Tensor& target,
48
+ KLDivFuncOptions::reduction_t reduction,
49
+ bool log_target = false) {
50
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
51
+ torch::Reduction::Reduction reduction_enum;
52
+
53
+ if (std::holds_alternative<enumtype::kMean>(reduction)) {
54
+ TORCH_WARN(
55
+ "reduction: 'mean' divides the total loss by both the batch size and the support size."
56
+ "'batchmean' divides only by the batch size, and aligns with the KL div math definition."
57
+ "'mean' will be changed to behave the same as 'batchmean' in the next major release.");
58
+ }
59
+
60
+ // special case for batchmean
61
+ if (std::holds_alternative<enumtype::kBatchMean>(reduction)) {
62
+ reduction_enum = torch::Reduction::Sum;
63
+ } else {
64
+ reduction_enum = enumtype::reduction_get_enum(reduction);
65
+ }
66
+
67
+ auto reduced = torch::kl_div(input, target, reduction_enum, log_target);
68
+
69
+ if (std::holds_alternative<enumtype::kBatchMean>(reduction) &&
70
+ input.dim() != 0) {
71
+ reduced = reduced / input.sizes()[0];
72
+ }
73
+
74
+ return reduced;
75
+ }
76
+ } // namespace detail
77
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
78
+
79
+ /// See
80
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.kl_div
81
+ /// about the exact behavior of this functional.
82
+ ///
83
+ /// See the documentation for `torch::nn::functional::KLDivFuncOptions` class to
84
+ /// learn what optional arguments are supported for this functional.
85
+ ///
86
+ /// Example:
87
+ /// ```
88
+ /// namespace F = torch::nn::functional;
89
+ /// F::kl_div(input, target,
90
+ /// F::KLDivFuncOptions.reduction(torch::kNone).log_target(false));
91
+ /// ```
92
+ inline Tensor kl_div(
93
+ const Tensor& input,
94
+ const Tensor& target,
95
+ const KLDivFuncOptions& options = {}) {
96
+ return detail::kl_div(
97
+ input, target, options.reduction(), options.log_target());
98
+ }
99
+
100
+ // ============================================================================
101
+
102
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
103
+ namespace detail {
104
+ inline Tensor mse_loss(
105
+ const Tensor& input,
106
+ const Tensor& target,
107
+ MSELossFuncOptions::reduction_t reduction) {
108
+ if (!(target.sizes() == input.sizes())) {
109
+ TORCH_WARN(
110
+ "Using a target size (",
111
+ target.sizes(),
112
+ ") that is different to the input size (",
113
+ input.sizes(),
114
+ "). ",
115
+ "This will likely lead to incorrect results due to broadcasting. ",
116
+ "Please ensure they have the same size.");
117
+ }
118
+ std::vector<torch::Tensor> broadcast_tensors =
119
+ torch::broadcast_tensors({input, target});
120
+ auto expanded_input = broadcast_tensors[0];
121
+ auto expanded_target = broadcast_tensors[1];
122
+ return torch::mse_loss(
123
+ expanded_input, expanded_target, enumtype::reduction_get_enum(reduction));
124
+ }
125
+ } // namespace detail
126
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
127
+
128
+ /// See
129
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.mse_loss
130
+ /// about the exact behavior of this functional.
131
+ ///
132
+ /// See the documentation for `torch::nn::functional::MSELossFuncOptions` class
133
+ /// to learn what optional arguments are supported for this functional.
134
+ ///
135
+ /// Example:
136
+ /// ```
137
+ /// namespace F = torch::nn::functional;
138
+ /// F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone));
139
+ /// ```
140
+ inline Tensor mse_loss(
141
+ const Tensor& input,
142
+ const Tensor& target,
143
+ const MSELossFuncOptions& options = {}) {
144
+ return detail::mse_loss(input, target, options.reduction());
145
+ }
146
+
147
+ // ============================================================================
148
+
149
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
150
+ namespace detail {
151
+ inline Tensor binary_cross_entropy(
152
+ const Tensor& input,
153
+ const Tensor& target,
154
+ const Tensor& weight,
155
+ BinaryCrossEntropyFuncOptions::reduction_t reduction) {
156
+ auto reduction_enum = enumtype::reduction_get_enum(reduction);
157
+
158
+ if (target.sizes() != input.sizes()) {
159
+ TORCH_CHECK(
160
+ false,
161
+ "Using a target size (",
162
+ target.sizes(),
163
+ ") ",
164
+ "that is different to the input size (",
165
+ input.sizes(),
166
+ ") is deprecated. ",
167
+ "Please ensure they have the same size.");
168
+ }
169
+
170
+ auto weight_ = weight;
171
+ if (weight_.defined()) {
172
+ auto new_size = at::infer_size(target.sizes(), weight_.sizes());
173
+ weight_ = weight_.expand(new_size);
174
+ }
175
+
176
+ return torch::binary_cross_entropy(input, target, weight_, reduction_enum);
177
+ }
178
+ } // namespace detail
179
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
180
+
181
+ /// See
182
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.binary_cross_entropy
183
+ /// about the exact behavior of this functional.
184
+ ///
185
+ /// See the documentation for
186
+ /// `torch::nn::functional::BinaryCrossEntropyFuncOptions` class to learn what
187
+ /// optional arguments are supported for this functional.
188
+ ///
189
+ /// Example:
190
+ /// ```
191
+ /// namespace F = torch::nn::functional;
192
+ /// F::binary_cross_entropy(input, target,
193
+ /// F::BinaryCrossEntropyFuncOptions().weight(weight));
194
+ /// ```
195
+ inline Tensor binary_cross_entropy(
196
+ const Tensor& input,
197
+ const Tensor& target,
198
+ const BinaryCrossEntropyFuncOptions& options = {}) {
199
+ return detail::binary_cross_entropy(
200
+ input, target, options.weight(), options.reduction());
201
+ }
202
+
203
+ // ============================================================================
204
+
205
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
206
+ namespace detail {
207
+ inline Tensor hinge_embedding_loss(
208
+ const Tensor& input,
209
+ const Tensor& target,
210
+ double margin,
211
+ HingeEmbeddingLossFuncOptions::reduction_t reduction) {
212
+ return torch::hinge_embedding_loss(
213
+ input, target, margin, enumtype::reduction_get_enum(reduction));
214
+ }
215
+ } // namespace detail
216
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
217
+
218
+ /// See
219
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hinge_embedding_loss
220
+ /// about the exact behavior of this functional.
221
+ ///
222
+ /// See the documentation for
223
+ /// `torch::nn::functional::HingeEmbeddingLossFuncOptions` class to learn what
224
+ /// optional arguments are supported for this functional.
225
+ ///
226
+ /// Example:
227
+ /// ```
228
+ /// namespace F = torch::nn::functional;
229
+ /// F::hinge_embedding_loss(input, target,
230
+ /// F::HingeEmbeddingLossFuncOptions().margin(2));
231
+ /// ```
232
+ inline Tensor hinge_embedding_loss(
233
+ const Tensor& input,
234
+ const Tensor& target,
235
+ const HingeEmbeddingLossFuncOptions& options = {}) {
236
+ return detail::hinge_embedding_loss(
237
+ input, target, options.margin(), options.reduction());
238
+ }
239
+
240
+ // ============================================================================
241
+
242
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
243
+ namespace detail {
244
+ inline Tensor multi_margin_loss(
245
+ const Tensor& input,
246
+ const Tensor& target,
247
+ int64_t p,
248
+ double margin,
249
+ const Tensor& weight,
250
+ MultiMarginLossFuncOptions::reduction_t reduction) {
251
+ TORCH_CHECK(p == 1 || p == 2, "only p == 1 and p == 2 supported");
252
+ if (weight.defined()) {
253
+ TORCH_CHECK(weight.dim() == 1, "weight must be one-dimensional");
254
+ }
255
+
256
+ return torch::multi_margin_loss(
257
+ input,
258
+ target,
259
+ p,
260
+ margin,
261
+ weight,
262
+ enumtype::reduction_get_enum(reduction));
263
+ }
264
+ } // namespace detail
265
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
266
+
267
+ /// See
268
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multi_margin_loss
269
+ /// about the exact behavior of this functional.
270
+ ///
271
+ /// See the documentation for
272
+ /// `torch::nn::functional::MultiMarginLossFuncOptions` class to learn what
273
+ /// optional arguments are supported for this functional.
274
+ ///
275
+ /// Example:
276
+ /// ```
277
+ /// namespace F = torch::nn::functional;
278
+ /// F::multi_margin_loss(input, target,
279
+ /// F::MultiMarginLossFuncOptions().margin(2).weight(weight));
280
+ /// ```
281
+ inline Tensor multi_margin_loss(
282
+ const Tensor& input,
283
+ const Tensor& target,
284
+ const MultiMarginLossFuncOptions& options = {}) {
285
+ return detail::multi_margin_loss(
286
+ input,
287
+ target,
288
+ options.p(),
289
+ options.margin(),
290
+ options.weight(),
291
+ options.reduction());
292
+ }
293
+
294
+ // ============================================================================
295
+
296
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
297
+ namespace detail {
298
+ inline Tensor cosine_embedding_loss(
299
+ const Tensor& input1,
300
+ const Tensor& input2,
301
+ const Tensor& target,
302
+ double margin,
303
+ CosineEmbeddingLossFuncOptions::reduction_t reduction) {
304
+ return torch::cosine_embedding_loss(
305
+ input1, input2, target, margin, enumtype::reduction_get_enum(reduction));
306
+ }
307
+ } // namespace detail
308
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
309
+
310
+ /// See
311
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cosine_embedding_loss
312
+ /// about the exact behavior of this functional.
313
+ ///
314
+ /// See the documentation for
315
+ /// `torch::nn::functional::CosineEmbeddingLossFuncOptions` class to learn what
316
+ /// optional arguments are supported for this functional.
317
+ ///
318
+ /// Example:
319
+ /// ```
320
+ /// namespace F = torch::nn::functional;
321
+ /// F::cosine_embedding_loss(input1, input2, target,
322
+ /// F::CosineEmbeddingLossFuncOptions().margin(0.5));
323
+ /// ```
324
+ inline Tensor cosine_embedding_loss(
325
+ const Tensor& input1,
326
+ const Tensor& input2,
327
+ const Tensor& target,
328
+ const CosineEmbeddingLossFuncOptions& options = {}) {
329
+ return detail::cosine_embedding_loss(
330
+ input1, input2, target, options.margin(), options.reduction());
331
+ }
332
+
333
+ // ============================================================================
334
+
335
+ inline Tensor _smooth_l1_loss(
336
+ const Tensor& input,
337
+ const Tensor& target,
338
+ double beta = 1.) {
339
+ auto t = torch::abs(input - target);
340
+ return torch::where(t < beta, 0.5 * torch::pow(t, 2) / beta, t - 0.5 * beta);
341
+ }
342
+
343
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
344
+ namespace detail {
345
+ inline Tensor smooth_l1_loss(
346
+ const Tensor& input,
347
+ const Tensor& target,
348
+ SmoothL1LossFuncOptions::reduction_t reduction,
349
+ c10::optional<double> beta_opt = c10::nullopt) {
350
+ if (target.sizes() != input.sizes()) {
351
+ TORCH_WARN(
352
+ "Using a target size (",
353
+ target.sizes(),
354
+ ") that is different to the input size (",
355
+ input.sizes(),
356
+ "). ",
357
+ "This will likely lead to incorrect results due to broadcasting. ",
358
+ "Please ensure they have the same size.");
359
+ }
360
+ double beta = beta_opt.value_or(1.0);
361
+
362
+ std::vector<Tensor> expanded_tensors =
363
+ torch::broadcast_tensors({input, target});
364
+ return torch::smooth_l1_loss(
365
+ expanded_tensors[0],
366
+ expanded_tensors[1],
367
+ enumtype::reduction_get_enum(reduction),
368
+ beta);
369
+ }
370
+ } // namespace detail
371
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
372
+
373
+ /// See
374
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.smooth_l1_loss
375
+ /// about the exact behavior of this functional.
376
+ ///
377
+ /// See the documentation for `torch::nn::functional::SmoothL1LossFuncOptions`
378
+ /// class to learn what optional arguments are supported for this functional.
379
+ ///
380
+ /// Example:
381
+ /// ```
382
+ /// namespace F = torch::nn::functional;
383
+ /// F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone));
384
+ /// ```
385
+ inline Tensor smooth_l1_loss(
386
+ const Tensor& input,
387
+ const Tensor& target,
388
+ const SmoothL1LossFuncOptions& options = {}) {
389
+ return detail::smooth_l1_loss(
390
+ input, target, options.reduction(), options.beta());
391
+ }
392
+
393
+ /// See
394
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.smooth_l1_loss
395
+ /// about the exact behavior of this functional.
396
+ ///
397
+ /// Example:
398
+ /// ```
399
+ /// namespace F = torch::nn::functional;
400
+ /// F::smooth_l1_loss(input, target, /*options=*/torch::kNone, /*beta=*/0.5);
401
+ /// ```
402
+ inline Tensor smooth_l1_loss(
403
+ const Tensor& input,
404
+ const Tensor& target,
405
+ const SmoothL1LossFuncOptions& options,
406
+ double beta) {
407
+ TORCH_CHECK(
408
+ options.beta() == c10::nullopt,
409
+ "expected beta not to be provided in 'options', but got ",
410
+ options.beta().value());
411
+ return detail::smooth_l1_loss(input, target, options.reduction(), beta);
412
+ }
413
+
414
+ // ============================================================================
415
+
416
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
417
+ namespace detail {
418
+ inline Tensor huber_loss(
419
+ const Tensor& input,
420
+ const Tensor& target,
421
+ HuberLossFuncOptions::reduction_t reduction,
422
+ double delta = 1.) {
423
+ if (target.sizes() != input.sizes()) {
424
+ TORCH_WARN(
425
+ "Using a target size (",
426
+ target.sizes(),
427
+ ") that is different to the input size (",
428
+ input.sizes(),
429
+ "). ",
430
+ "This will likely lead to incorrect results due to broadcasting. ",
431
+ "Please ensure they have the same size.");
432
+ }
433
+
434
+ std::vector<Tensor> expanded_tensors =
435
+ torch::broadcast_tensors({input, target});
436
+ return torch::huber_loss(
437
+ expanded_tensors[0],
438
+ expanded_tensors[1],
439
+ enumtype::reduction_get_enum(reduction),
440
+ delta);
441
+ }
442
+ } // namespace detail
443
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
444
+
445
+ /// See
446
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.huber_loss
447
+ /// about the exact behavior of this functional.
448
+ ///
449
+ /// See the documentation for `torch::nn::functional::HuberLossFuncOptions`
450
+ /// class to learn what optional arguments are supported for this functional.
451
+ ///
452
+ /// Example:
453
+ /// ```
454
+ /// namespace F = torch::nn::functional;
455
+ /// F::huber_loss(input, target,
456
+ /// F::HuberLossFuncOptions().reduction(torch::kNone).delta(0.5));
457
+ /// ```
458
+ inline Tensor huber_loss(
459
+ const Tensor& input,
460
+ const Tensor& target,
461
+ const HuberLossFuncOptions& options = {}) {
462
+ return detail::huber_loss(
463
+ input, target, options.reduction(), options.delta());
464
+ }
465
+
466
+ // ============================================================================
467
+
468
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
469
+ namespace detail {
470
+ inline Tensor multilabel_margin_loss(
471
+ const Tensor& input,
472
+ const Tensor& target,
473
+ MultilabelMarginLossFuncOptions::reduction_t reduction) {
474
+ return torch::multilabel_margin_loss(
475
+ input, target, enumtype::reduction_get_enum(reduction));
476
+ }
477
+ } // namespace detail
478
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
479
+
480
+ /// See
481
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multilabel_margin_loss
482
+ /// about the exact behavior of this functional.
483
+ ///
484
+ /// See the documentation for
485
+ /// `torch::nn::functional::MultilabelMarginLossFuncOptions` class to learn what
486
+ /// optional arguments are supported for this functional.
487
+ ///
488
+ /// Example:
489
+ /// ```
490
+ /// namespace F = torch::nn::functional;
491
+ /// F::multilabel_margin_loss(input, target,
492
+ /// F::MultilabelMarginLossFuncOptions(torch::kNone));
493
+ /// ```
494
+ inline Tensor multilabel_margin_loss(
495
+ const Tensor& input,
496
+ const Tensor& target,
497
+ const MultilabelMarginLossFuncOptions& options = {}) {
498
+ return detail::multilabel_margin_loss(input, target, options.reduction());
499
+ }
500
+
501
+ // ============================================================================
502
+
503
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
504
+ namespace detail {
505
+ inline Tensor soft_margin_loss(
506
+ const Tensor& input,
507
+ const Tensor& target,
508
+ SoftMarginLossFuncOptions::reduction_t reduction) {
509
+ return torch::soft_margin_loss(
510
+ input, target, enumtype::reduction_get_enum(reduction));
511
+ }
512
+ } // namespace detail
513
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
514
+
515
+ /// See
516
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.soft_margin_loss
517
+ /// about the exact behavior of this functional.
518
+ ///
519
+ /// See the documentation for `torch::nn::functional::SoftMarginLossFuncOptions`
520
+ /// class to learn what optional arguments are supported for this functional.
521
+ ///
522
+ /// Example:
523
+ /// ```
524
+ /// namespace F = torch::nn::functional;
525
+ /// F::soft_margin_loss(input, target,
526
+ /// F::SoftMarginLossFuncOptions(torch::kNone));
527
+ /// ```
528
+ inline Tensor soft_margin_loss(
529
+ const Tensor& input,
530
+ const Tensor& target,
531
+ const SoftMarginLossFuncOptions& options = {}) {
532
+ return detail::soft_margin_loss(input, target, options.reduction());
533
+ }
534
+
535
+ // ============================================================================
536
+
537
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
538
+ namespace detail {
539
+ inline Tensor multilabel_soft_margin_loss(
540
+ const Tensor& input,
541
+ const Tensor& target,
542
+ const Tensor& weight,
543
+ MultilabelSoftMarginLossFuncOptions::reduction_t reduction) {
544
+ auto loss =
545
+ -(target * torch::log_sigmoid(input) +
546
+ (1 - target) * torch::log_sigmoid(-input));
547
+ if (weight.defined()) {
548
+ loss = loss * weight;
549
+ }
550
+
551
+ auto class_dim = input.dim() - 1;
552
+ auto C = input.size(class_dim);
553
+ loss = loss.sum(class_dim) / C; // only return N loss values
554
+
555
+ Tensor ret;
556
+
557
+ if (std::holds_alternative<enumtype::kNone>(reduction)) {
558
+ ret = loss;
559
+ } else if (std::holds_alternative<enumtype::kMean>(reduction)) {
560
+ ret = loss.mean();
561
+ } else if (std::holds_alternative<enumtype::kSum>(reduction)) {
562
+ ret = loss.sum();
563
+ } else {
564
+ ret = input;
565
+ TORCH_INTERNAL_ASSERT(
566
+ false, enumtype::get_enum_name(reduction), " is not valid");
567
+ }
568
+ return ret;
569
+ }
570
+ } // namespace detail
571
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
572
+
573
+ /// See
574
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multilabel_soft_margin_loss
575
+ /// about the exact behavior of this functional.
576
+ ///
577
+ /// See the documentation for
578
+ /// `torch::nn::functional::MultilabelSoftMarginLossFuncOptions` class to learn
579
+ /// what optional arguments are supported for this functional.
580
+ ///
581
+ /// Example:
582
+ /// ```
583
+ /// namespace F = torch::nn::functional;
584
+ /// F::multilabel_soft_margin_loss(input, target,
585
+ /// F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight));
586
+ /// ```
587
+ inline Tensor multilabel_soft_margin_loss(
588
+ const Tensor& input,
589
+ const Tensor& target,
590
+ const MultilabelSoftMarginLossFuncOptions& options = {}) {
591
+ return detail::multilabel_soft_margin_loss(
592
+ input, target, options.weight(), options.reduction());
593
+ }
594
+
595
+ // ============================================================================
596
+
597
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
598
+ namespace detail {
599
+ inline Tensor triplet_margin_loss(
600
+ const Tensor& anchor,
601
+ const Tensor& positive,
602
+ const Tensor& negative,
603
+ double margin,
604
+ double p,
605
+ double eps,
606
+ bool swap,
607
+ TripletMarginLossFuncOptions::reduction_t reduction) {
608
+ return torch::triplet_margin_loss(
609
+ anchor,
610
+ positive,
611
+ negative,
612
+ margin,
613
+ p,
614
+ eps,
615
+ swap,
616
+ enumtype::reduction_get_enum(reduction));
617
+ }
618
+ } // namespace detail
619
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
620
+
621
+ /// See
622
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.triplet_margin_loss
623
+ /// about the exact behavior of this functional.
624
+ ///
625
+ /// See the documentation for
626
+ /// `torch::nn::functional::TripletMarginLossFuncOptions` class to learn what
627
+ /// optional arguments are supported for this functional.
628
+ ///
629
+ /// Example:
630
+ /// ```
631
+ /// namespace F = torch::nn::functional;
632
+ /// F::triplet_margin_loss(anchor, positive, negative,
633
+ /// F::TripletMarginLossFuncOptions().margin(1.0));
634
+ /// ```
635
+ inline Tensor triplet_margin_loss(
636
+ const Tensor& anchor,
637
+ const Tensor& positive,
638
+ const Tensor& negative,
639
+ const TripletMarginLossFuncOptions& options = {}) {
640
+ return detail::triplet_margin_loss(
641
+ anchor,
642
+ positive,
643
+ negative,
644
+ options.margin(),
645
+ options.p(),
646
+ options.eps(),
647
+ options.swap(),
648
+ options.reduction());
649
+ }
650
+
651
+ // ============================================================================
652
+
653
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
654
+ namespace detail {
655
+ inline Tensor triplet_margin_with_distance_loss(
656
+ const Tensor& anchor,
657
+ const Tensor& positive,
658
+ const Tensor& negative,
659
+ c10::optional<TripletMarginWithDistanceLossFuncOptions::distance_function_t>
660
+ distance_function,
661
+ double margin,
662
+ bool swap,
663
+ TripletMarginWithDistanceLossFuncOptions::reduction_t reduction) {
664
+ Tensor dist_pos, dist_neg;
665
+ if (distance_function.has_value()) {
666
+ auto distance_function_impl = distance_function.value();
667
+ dist_pos = distance_function_impl(anchor, positive);
668
+ dist_neg = distance_function_impl(anchor, negative);
669
+ } else {
670
+ dist_pos = pairwise_distance(anchor, positive);
671
+ dist_neg = pairwise_distance(anchor, negative);
672
+ }
673
+
674
+ if (swap) {
675
+ Tensor dist_swap;
676
+ if (distance_function.has_value()) {
677
+ dist_swap = distance_function.value()(positive, negative);
678
+ } else {
679
+ dist_swap = pairwise_distance(positive, negative);
680
+ }
681
+ dist_neg = torch::min(dist_neg, dist_swap);
682
+ }
683
+
684
+ auto loss = torch::clamp_min(dist_pos - dist_neg + margin, 0);
685
+
686
+ Tensor ret;
687
+ if (std::holds_alternative<enumtype::kNone>(reduction)) {
688
+ ret = loss;
689
+ } else if (std::holds_alternative<enumtype::kMean>(reduction)) {
690
+ ret = loss.mean();
691
+ } else if (std::holds_alternative<enumtype::kSum>(reduction)) {
692
+ ret = loss.sum();
693
+ } else {
694
+ ret = anchor;
695
+ TORCH_INTERNAL_ASSERT(
696
+ false, enumtype::get_enum_name(reduction), " is not valid");
697
+ }
698
+ return ret;
699
+ }
700
+ } // namespace detail
701
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
702
+
703
+ /// See
704
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.triplet_margin_with_distance_loss
705
+ /// about the exact behavior of this functional.
706
+ ///
707
+ /// See the documentation for
708
+ /// `torch::nn::functional::TripletMarginWithDistanceLossFuncOptions` class to
709
+ /// learn what optional arguments are supported for this functional.
710
+ ///
711
+ /// Example:
712
+ /// ```
713
+ /// namespace F = torch::nn::functional;
714
+ /// F::triplet_margin_with_distance_loss(anchor, positive, negative,
715
+ /// F::TripletMarginWithDistanceLossFuncOptions().margin(1.0));
716
+ /// ```
717
+ inline Tensor triplet_margin_with_distance_loss(
718
+ const Tensor& anchor,
719
+ const Tensor& positive,
720
+ const Tensor& negative,
721
+ const TripletMarginWithDistanceLossFuncOptions& options = {}) {
722
+ return detail::triplet_margin_with_distance_loss(
723
+ anchor,
724
+ positive,
725
+ negative,
726
+ options.distance_function(),
727
+ options.margin(),
728
+ options.swap(),
729
+ options.reduction());
730
+ }
731
+
732
+ // ============================================================================
733
+
734
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
735
+ namespace detail {
736
+ inline Tensor ctc_loss(
737
+ const Tensor& log_probs,
738
+ const Tensor& targets,
739
+ const Tensor& input_lengths,
740
+ const Tensor& target_lengths,
741
+ int64_t blank,
742
+ CTCLossFuncOptions::reduction_t reduction,
743
+ bool zero_infinity) {
744
+ return torch::ctc_loss(
745
+ log_probs,
746
+ targets,
747
+ input_lengths,
748
+ target_lengths,
749
+ blank,
750
+ enumtype::reduction_get_enum(reduction),
751
+ zero_infinity);
752
+ }
753
+ } // namespace detail
754
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
755
+
756
+ /// See
757
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.ctc_loss
758
+ /// about the exact behavior of this functional.
759
+ ///
760
+ /// See the documentation for `torch::nn::functional::CTCLossFuncOptions` class
761
+ /// to learn what optional arguments are supported for this functional.
762
+ ///
763
+ /// Example:
764
+ /// ```
765
+ /// namespace F = torch::nn::functional;
766
+ /// F::ctc_loss(log_probs, targets, input_lengths, target_lengths,
767
+ /// F::CTCLossFuncOptions().reduction(torch::kNone));
768
+ /// ```
769
+ inline Tensor ctc_loss(
770
+ const Tensor& log_probs,
771
+ const Tensor& targets,
772
+ const Tensor& input_lengths,
773
+ const Tensor& target_lengths,
774
+ const CTCLossFuncOptions& options = {}) {
775
+ return detail::ctc_loss(
776
+ log_probs,
777
+ targets,
778
+ input_lengths,
779
+ target_lengths,
780
+ options.blank(),
781
+ options.reduction(),
782
+ options.zero_infinity());
783
+ }
784
+
785
+ // ============================================================================
786
+
787
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
788
+ namespace detail {
789
+ inline Tensor poisson_nll_loss(
790
+ const Tensor& input,
791
+ const Tensor& target,
792
+ bool log_input,
793
+ bool full,
794
+ double eps,
795
+ PoissonNLLLossFuncOptions::reduction_t reduction) {
796
+ return torch::poisson_nll_loss(
797
+ input,
798
+ target,
799
+ log_input,
800
+ full,
801
+ eps,
802
+ enumtype::reduction_get_enum(reduction));
803
+ }
804
+ } // namespace detail
805
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
806
+
807
+ /// See
808
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.poisson_nll_loss
809
+ /// about the exact behavior of this functional.
810
+ ///
811
+ /// See the documentation for `torch::nn::functional::PoissonNLLLossFuncOptions`
812
+ /// class to learn what optional arguments are supported for this functional.
813
+ ///
814
+ /// Example:
815
+ /// ```
816
+ /// namespace F = torch::nn::functional;
817
+ /// F::poisson_nll_loss(input, target,
818
+ /// F::PoissonNLLLossFuncOptions().reduction(torch::kNone));
819
+ /// ```
820
+ inline Tensor poisson_nll_loss(
821
+ const Tensor& input,
822
+ const Tensor& target,
823
+ const PoissonNLLLossFuncOptions& options = {}) {
824
+ return detail::poisson_nll_loss(
825
+ input,
826
+ target,
827
+ options.log_input(),
828
+ options.full(),
829
+ options.eps(),
830
+ options.reduction());
831
+ }
832
+
833
+ // ============================================================================
834
+
835
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
836
+ namespace detail {
837
+ inline Tensor margin_ranking_loss(
838
+ const Tensor& input1,
839
+ const Tensor& input2,
840
+ const Tensor& target,
841
+ double margin,
842
+ MarginRankingLossFuncOptions::reduction_t reduction) {
843
+ TORCH_CHECK(
844
+ input1.dim() == input2.dim() && input1.dim() == target.dim(),
845
+ "margin_ranking_loss : All input tensors should have same dimension but got sizes: "
846
+ "input1: ",
847
+ input1.sizes(),
848
+ ", input2: ",
849
+ input2.sizes(),
850
+ ", target: ",
851
+ target.sizes());
852
+ return torch::margin_ranking_loss(
853
+ input1, input2, target, margin, enumtype::reduction_get_enum(reduction));
854
+ }
855
+ } // namespace detail
856
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
857
+
858
+ /// See
859
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.margin_ranking_loss
860
+ /// about the exact behavior of this functional.
861
+ ///
862
+ /// See the documentation for
863
+ /// `torch::nn::functional::MarginRankingLossFuncOptions` class to learn what
864
+ /// optional arguments are supported for this functional.
865
+ ///
866
+ /// Example:
867
+ /// ```
868
+ /// namespace F = torch::nn::functional;
869
+ /// F::margin_ranking_loss(input1, input2, target,
870
+ /// F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum));
871
+ /// ```
872
+ inline Tensor margin_ranking_loss(
873
+ const Tensor& input1,
874
+ const Tensor& input2,
875
+ const Tensor& target,
876
+ const MarginRankingLossFuncOptions& options = {}) {
877
+ return detail::margin_ranking_loss(
878
+ input1, input2, target, options.margin(), options.reduction());
879
+ }
880
+
881
+ // ============================================================================
882
+
883
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
884
+ namespace detail {
885
+ inline Tensor nll_loss(
886
+ const Tensor& input,
887
+ const Tensor& target,
888
+ const Tensor& weight,
889
+ int64_t ignore_index,
890
+ const NLLLossFuncOptions::reduction_t reduction) {
891
+ if (input.dim() < 2) {
892
+ TORCH_CHECK(false, "Expected 2 or more dimensions (got ", input.dim(), ")");
893
+ }
894
+
895
+ if (input.sizes()[0] != target.sizes()[0]) {
896
+ TORCH_CHECK(
897
+ false,
898
+ "Expected input batch_size (",
899
+ input.sizes()[0],
900
+ ") to match target batch_size (",
901
+ target.sizes()[0],
902
+ ").");
903
+ }
904
+
905
+ return torch::nll_loss_nd(
906
+ input,
907
+ target,
908
+ weight,
909
+ enumtype::reduction_get_enum(reduction),
910
+ ignore_index);
911
+ }
912
+ } // namespace detail
913
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
914
+
915
+ /// See
916
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.nll_loss
917
+ /// about the exact behavior of this functional.
918
+ ///
919
+ /// See the documentation for `torch::nn::functional::NLLLossFuncOptions` class
920
+ /// to learn what optional arguments are supported for this functional.
921
+ ///
922
+ /// Example:
923
+ /// ```
924
+ /// namespace F = torch::nn::functional;
925
+ /// F::nll_loss(input, target,
926
+ /// F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean));
927
+ /// ```
928
+ inline Tensor nll_loss(
929
+ const Tensor& input,
930
+ const Tensor& target,
931
+ const NLLLossFuncOptions& options = {}) {
932
+ return detail::nll_loss(
933
+ input,
934
+ target,
935
+ options.weight(),
936
+ options.ignore_index(),
937
+ options.reduction());
938
+ }
939
+
940
+ // ============================================================================
941
+
942
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
943
+ namespace detail {
944
+ inline Tensor cross_entropy(
945
+ const Tensor& input,
946
+ const Tensor& target,
947
+ const Tensor& weight,
948
+ int64_t ignore_index,
949
+ CrossEntropyFuncOptions::reduction_t reduction,
950
+ double label_smoothing) {
951
+ return torch::cross_entropy_loss(
952
+ input,
953
+ target,
954
+ weight,
955
+ enumtype::reduction_get_enum(reduction),
956
+ ignore_index,
957
+ label_smoothing);
958
+ }
959
+ } // namespace detail
960
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
961
+
962
+ /// See
963
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cross_entropy
964
+ /// about the exact behavior of this functional.
965
+ ///
966
+ /// See the documentation for `torch::nn::functional::CrossEntropyFuncOptions`
967
+ /// class to learn what optional arguments are supported for this functional.
968
+ ///
969
+ /// Example:
970
+ /// ```
971
+ /// namespace F = torch::nn::functional;
972
+ /// F::cross_entropy(input, target,
973
+ /// F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean));
974
+ /// ```
975
+ inline Tensor cross_entropy(
976
+ const Tensor& input,
977
+ const Tensor& target,
978
+ const CrossEntropyFuncOptions& options = {}) {
979
+ return detail::cross_entropy(
980
+ input,
981
+ target,
982
+ options.weight(),
983
+ options.ignore_index(),
984
+ options.reduction(),
985
+ options.label_smoothing());
986
+ }
987
+
988
+ // ============================================================================
989
+
990
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
991
+ namespace detail {
992
+ inline Tensor binary_cross_entropy_with_logits(
993
+ const Tensor& input,
994
+ const Tensor& target,
995
+ const Tensor& weight,
996
+ BinaryCrossEntropyWithLogitsFuncOptions::reduction_t reduction,
997
+ const Tensor& pos_weight) {
998
+ TORCH_CHECK(
999
+ target.sizes() == input.sizes(),
1000
+ "Target size (",
1001
+ target.sizes(),
1002
+ ") must be the same as input size (",
1003
+ input.sizes(),
1004
+ ")");
1005
+
1006
+ return torch::binary_cross_entropy_with_logits(
1007
+ input,
1008
+ target,
1009
+ weight,
1010
+ pos_weight,
1011
+ enumtype::reduction_get_enum(reduction));
1012
+ }
1013
+ } // namespace detail
1014
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
1015
+
1016
+ /// See
1017
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.binary_cross_entropy_with_logits
1018
+ /// about the exact behavior of this functional.
1019
+ ///
1020
+ /// See the documentation for
1021
+ /// `torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions` class to
1022
+ /// learn what optional arguments are supported for this functional.
1023
+ ///
1024
+ /// Example:
1025
+ /// ```
1026
+ /// namespace F = torch::nn::functional;
1027
+ /// F::binary_cross_entropy_with_logits(input, target,
1028
+ /// F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum));
1029
+ /// ```
1030
+ inline Tensor binary_cross_entropy_with_logits(
1031
+ const Tensor& input,
1032
+ const Tensor& target,
1033
+ const BinaryCrossEntropyWithLogitsFuncOptions& options = {}) {
1034
+ return detail::binary_cross_entropy_with_logits(
1035
+ input,
1036
+ target,
1037
+ options.weight(),
1038
+ options.reduction(),
1039
+ options.pos_weight());
1040
+ }
1041
+
1042
+ } // namespace functional
1043
+ } // namespace nn
1044
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/normalization.h ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/functional/padding.h>
4
+ #include <torch/nn/functional/pooling.h>
5
+ #include <torch/nn/options/normalization.h>
6
+ #include <torch/types.h>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+ namespace functional {
11
+
12
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
13
+ namespace detail {
14
+ inline Tensor normalize(
15
+ const Tensor& input,
16
+ double p,
17
+ int64_t dim,
18
+ double eps,
19
+ c10::optional<Tensor> out) {
20
+ if (out == c10::nullopt) {
21
+ auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input);
22
+ return input / denom;
23
+ } else {
24
+ auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input);
25
+ return torch::div_out(*out, input, denom);
26
+ }
27
+ }
28
+ } // namespace detail
29
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
30
+
31
+ /// See
32
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.normalize
33
+ /// about the exact behavior of this functional.
34
+ ///
35
+ /// See the documentation for `torch::nn::functional::NormalizeFuncOptions`
36
+ /// class to learn what optional arguments are supported for this functional.
37
+ ///
38
+ /// Example:
39
+ /// ```
40
+ /// namespace F = torch::nn::functional;
41
+ /// F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1));
42
+ /// ```
43
+ inline Tensor normalize(
44
+ const Tensor& input,
45
+ NormalizeFuncOptions options = {}) {
46
+ return detail::normalize(
47
+ input, options.p(), options.dim(), options.eps(), options.out());
48
+ }
49
+
50
+ // ============================================================================
51
+
52
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
53
+ namespace detail {
54
+ inline Tensor layer_norm(
55
+ const Tensor& input,
56
+ const std::vector<int64_t>& normalized_shape,
57
+ const Tensor& weight,
58
+ const Tensor& bias,
59
+ double eps) {
60
+ return torch::layer_norm(input, normalized_shape, weight, bias, eps);
61
+ }
62
+ } // namespace detail
63
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
64
+
65
+ /// See
66
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.layer_norm
67
+ /// about the exact behavior of this functional.
68
+ ///
69
+ /// See the documentation for `torch::nn::functional::LayerNormFuncOptions`
70
+ /// class to learn what optional arguments are supported for this functional.
71
+ ///
72
+ /// Example:
73
+ /// ```
74
+ /// namespace F = torch::nn::functional;
75
+ /// F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5));
76
+ /// ```
77
+ inline Tensor layer_norm(
78
+ const Tensor& input,
79
+ const LayerNormFuncOptions& options) {
80
+ return detail::layer_norm(
81
+ input,
82
+ options.normalized_shape(),
83
+ options.weight(),
84
+ options.bias(),
85
+ options.eps());
86
+ }
87
+
88
+ // ============================================================================
89
+
90
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
91
+ namespace detail {
92
+ inline Tensor local_response_norm(
93
+ const Tensor& input,
94
+ int64_t size,
95
+ double alpha,
96
+ double beta,
97
+ double k) {
98
+ auto dim = input.dim();
99
+ TORCH_CHECK(
100
+ dim >= 3,
101
+ "Expected 3D or higher dimensionality input (got ",
102
+ dim,
103
+ " dimensions)");
104
+ auto div = input.mul(input).unsqueeze(1);
105
+ if (dim == 3) {
106
+ div = detail::pad(
107
+ div,
108
+ /*pad=*/{0, 0, size / 2, (size - 1) / 2},
109
+ /*mode=*/torch::kConstant,
110
+ /*value=*/0);
111
+ div = detail::avg_pool2d(
112
+ div,
113
+ /*kernel_size=*/{size, 1},
114
+ /*stride=*/1,
115
+ /*padding=*/0,
116
+ /*ceil_mode=*/false,
117
+ /*count_include_pad=*/true,
118
+ /*divisor_override=*/c10::nullopt)
119
+ .squeeze(1);
120
+ } else {
121
+ auto sizes = input.sizes();
122
+ div = div.view({sizes[0], 1, sizes[1], sizes[2], -1});
123
+ div = detail::pad(
124
+ div,
125
+ /*pad=*/{0, 0, 0, 0, size / 2, (size - 1) / 2},
126
+ /*mode=*/torch::kConstant,
127
+ /*value=*/0);
128
+ div = detail::avg_pool3d(
129
+ div,
130
+ /*kernel_size=*/{size, 1, 1},
131
+ /*stride=*/1,
132
+ /*padding=*/0,
133
+ /*ceil_mode=*/false,
134
+ /*count_include_pad=*/true,
135
+ /*divisor_override=*/c10::nullopt)
136
+ .squeeze(1);
137
+ div = div.view(sizes);
138
+ }
139
+ div = div.mul(alpha).add(k).pow(beta);
140
+ return input / div;
141
+ }
142
+ } // namespace detail
143
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
144
+
145
+ /// See
146
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.local_response_norm
147
+ /// about the exact behavior of this functional.
148
+ ///
149
+ /// See the documentation for
150
+ /// `torch::nn::functional::LocalResponseNormFuncOptions` class to learn what
151
+ /// optional arguments are supported for this functional.
152
+ ///
153
+ /// Example:
154
+ /// ```
155
+ /// namespace F = torch::nn::functional;
156
+ /// F::local_response_norm(x, F::LocalResponseNormFuncOptions(2));
157
+ /// ```
158
+ inline Tensor local_response_norm(
159
+ const Tensor& input,
160
+ const LocalResponseNormFuncOptions& options) {
161
+ return detail::local_response_norm(
162
+ input, options.size(), options.alpha(), options.beta(), options.k());
163
+ }
164
+
165
+ // ============================================================================
166
+
167
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
168
+ namespace detail {
169
+ inline Tensor group_norm(
170
+ const Tensor& input,
171
+ int64_t num_groups,
172
+ const Tensor& weight,
173
+ const Tensor& bias,
174
+ double eps) {
175
+ return torch::group_norm(
176
+ input,
177
+ num_groups,
178
+ weight,
179
+ bias,
180
+ eps,
181
+ at::globalContext().userEnabledCuDNN());
182
+ }
183
+ } // namespace detail
184
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
185
+
186
+ /// See
187
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.group_norm
188
+ /// about the exact behavior of this functional.
189
+ ///
190
+ /// See the documentation for `torch::nn::functional::GroupNormFuncOptions`
191
+ /// class to learn what optional arguments are supported for this functional.
192
+ ///
193
+ /// Example:
194
+ /// ```
195
+ /// namespace F = torch::nn::functional;
196
+ /// F::group_norm(input, F::GroupNormFuncOptions(2).eps(2e-5));
197
+ /// ```
198
+ inline Tensor group_norm(
199
+ const Tensor& input,
200
+ const GroupNormFuncOptions& options) {
201
+ return detail::group_norm(
202
+ input,
203
+ options.num_groups(),
204
+ options.weight(),
205
+ options.bias(),
206
+ options.eps());
207
+ }
208
+
209
+ } // namespace functional
210
+ } // namespace nn
211
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/padding.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/PadNd.h>
4
+ #include <torch/nn/options/padding.h>
5
+
6
+ namespace torch {
7
+ namespace nn {
8
+ namespace functional {
9
+
10
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
11
+ namespace detail {
12
+ inline Tensor pad(
13
+ const Tensor& input,
14
+ IntArrayRef pad,
15
+ PadFuncOptions::mode_t mode,
16
+ double value) {
17
+ const auto mode_enum = [&] {
18
+ if (std::holds_alternative<enumtype::kConstant>(mode)) {
19
+ return at::padding_mode::constant;
20
+ } else if (std::holds_alternative<enumtype::kReflect>(mode)) {
21
+ return at::padding_mode::reflect;
22
+ } else if (std::holds_alternative<enumtype::kReplicate>(mode)) {
23
+ return at::padding_mode::replicate;
24
+ } else if (std::holds_alternative<enumtype::kCircular>(mode)) {
25
+ return at::padding_mode::circular;
26
+ }
27
+ TORCH_CHECK(false, "Unrecognised padding mode");
28
+ }();
29
+
30
+ c10::optional<double> fill_value;
31
+ if (value != 0.0) {
32
+ fill_value = value;
33
+ }
34
+ return at::_pad_enum(input, pad, static_cast<int64_t>(mode_enum), fill_value);
35
+ }
36
+ } // namespace detail
37
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
38
+
39
+ /// See
40
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pad
41
+ /// about the exact behavior of this functional.
42
+ ///
43
+ /// See the documentation for `torch::nn::functional::PadFuncOptions` class to
44
+ /// learn what optional arguments are supported for this functional.
45
+ ///
46
+ /// Example:
47
+ /// ```
48
+ /// namespace F = torch::nn::functional;
49
+ /// F::pad(input, F::PadFuncOptions({1, 2, 2, 1, 1,
50
+ /// 2}).mode(torch::kReplicate));
51
+ /// ```
52
+ inline Tensor pad(const Tensor& input, const PadFuncOptions& options) {
53
+ return detail::pad(input, options.pad(), options.mode(), options.value());
54
+ }
55
+
56
+ } // namespace functional
57
+ } // namespace nn
58
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pixelshuffle.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/options/pixelshuffle.h>
4
+
5
+ namespace torch {
6
+ namespace nn {
7
+ namespace functional {
8
+
9
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
10
+ namespace detail {
11
+ inline Tensor pixel_shuffle(const Tensor& input, int64_t upscale_factor) {
12
+ return torch::pixel_shuffle(input, upscale_factor);
13
+ }
14
+
15
+ inline Tensor pixel_unshuffle(const Tensor& input, int64_t downscale_factor) {
16
+ return torch::pixel_unshuffle(input, downscale_factor);
17
+ }
18
+ } // namespace detail
19
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
20
+
21
+ /// See
22
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pixel_shuffle
23
+ /// about the exact behavior of this functional.
24
+ ///
25
+ /// See the documentation for `torch::nn::functional::PixelShuffleFuncOptions`
26
+ /// class to learn what optional arguments are supported for this functional.
27
+ ///
28
+ /// Example:
29
+ /// ```
30
+ /// namespace F = torch::nn::functional;
31
+ /// F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2));
32
+ /// ```
33
+ inline Tensor pixel_shuffle(
34
+ const Tensor& input,
35
+ const PixelShuffleFuncOptions& options) {
36
+ return detail::pixel_shuffle(input, options.upscale_factor());
37
+ }
38
+
39
+ inline Tensor pixel_unshuffle(
40
+ const Tensor& input,
41
+ const PixelUnshuffleFuncOptions& options) {
42
+ return detail::pixel_unshuffle(input, options.downscale_factor());
43
+ }
44
+
45
+ } // namespace functional
46
+ } // namespace nn
47
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pooling.h ADDED
@@ -0,0 +1,1153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/nn/functional/activation.h>
5
+ #include <torch/nn/modules/utils.h>
6
+ #include <torch/nn/options/pooling.h>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+ namespace functional {
11
+
12
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
13
+ namespace detail {
14
+ inline Tensor avg_pool1d(
15
+ const Tensor& input,
16
+ ExpandingArray<1> kernel_size,
17
+ ExpandingArray<1> stride,
18
+ ExpandingArray<1> padding,
19
+ bool ceil_mode,
20
+ bool count_include_pad) {
21
+ return torch::avg_pool1d(
22
+ input, kernel_size, stride, padding, ceil_mode, count_include_pad);
23
+ }
24
+ } // namespace detail
25
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
26
+
27
+ /// See
28
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool1d
29
+ /// about the exact behavior of this functional.
30
+ ///
31
+ /// See the documentation for `torch::nn::functional::AvgPool1dFuncOptions`
32
+ /// class to learn what optional arguments are supported for this functional.
33
+ ///
34
+ /// Example:
35
+ /// ```
36
+ /// namespace F = torch::nn::functional;
37
+ /// F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2));
38
+ /// ```
39
+ inline Tensor avg_pool1d(
40
+ const Tensor& input,
41
+ const AvgPool1dFuncOptions& options) {
42
+ return avg_pool1d(
43
+ input,
44
+ options.kernel_size(),
45
+ options.stride(),
46
+ options.padding(),
47
+ options.ceil_mode(),
48
+ options.count_include_pad());
49
+ }
50
+
51
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
52
+ namespace detail {
53
+ inline Tensor avg_pool2d(
54
+ const Tensor& input,
55
+ ExpandingArray<2> kernel_size,
56
+ ExpandingArray<2> stride,
57
+ ExpandingArray<2> padding,
58
+ bool ceil_mode,
59
+ bool count_include_pad,
60
+ c10::optional<int64_t> divisor_override) {
61
+ return torch::avg_pool2d(
62
+ input,
63
+ kernel_size,
64
+ stride,
65
+ padding,
66
+ ceil_mode,
67
+ count_include_pad,
68
+ divisor_override);
69
+ }
70
+ } // namespace detail
71
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
72
+
73
+ /// See
74
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool2d
75
+ /// about the exact behavior of this functional.
76
+ ///
77
+ /// See the documentation for `torch::nn::functional::AvgPool2dFuncOptions`
78
+ /// class to learn what optional arguments are supported for this functional.
79
+ ///
80
+ /// Example:
81
+ /// ```
82
+ /// namespace F = torch::nn::functional;
83
+ /// F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2));
84
+ /// ```
85
+ inline Tensor avg_pool2d(
86
+ const Tensor& input,
87
+ const AvgPool2dFuncOptions& options) {
88
+ return detail::avg_pool2d(
89
+ input,
90
+ options.kernel_size(),
91
+ options.stride(),
92
+ options.padding(),
93
+ options.ceil_mode(),
94
+ options.count_include_pad(),
95
+ options.divisor_override());
96
+ }
97
+
98
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
99
+ namespace detail {
100
+ inline Tensor avg_pool3d(
101
+ const Tensor& input,
102
+ ExpandingArray<3> kernel_size,
103
+ ExpandingArray<3> stride,
104
+ ExpandingArray<3> padding,
105
+ bool ceil_mode,
106
+ bool count_include_pad,
107
+ c10::optional<int64_t> divisor_override) {
108
+ return torch::avg_pool3d(
109
+ input,
110
+ kernel_size,
111
+ stride,
112
+ padding,
113
+ ceil_mode,
114
+ count_include_pad,
115
+ divisor_override);
116
+ }
117
+ } // namespace detail
118
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
119
+
120
+ /// See
121
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool3d
122
+ /// about the exact behavior of this functional.
123
+ ///
124
+ /// See the documentation for `torch::nn::functional::AvgPool3dFuncOptions`
125
+ /// class to learn what optional arguments are supported for this functional.
126
+ ///
127
+ /// Example:
128
+ /// ```
129
+ /// namespace F = torch::nn::functional;
130
+ /// F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2));
131
+ /// ```
132
+ inline Tensor avg_pool3d(
133
+ const Tensor& input,
134
+ const AvgPool3dFuncOptions& options) {
135
+ return detail::avg_pool3d(
136
+ input,
137
+ options.kernel_size(),
138
+ options.stride(),
139
+ options.padding(),
140
+ options.ceil_mode(),
141
+ options.count_include_pad(),
142
+ options.divisor_override());
143
+ }
144
+
145
+ // ============================================================================
146
+
147
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
148
+ namespace detail {
149
+ inline Tensor max_pool1d(
150
+ const Tensor& input,
151
+ ExpandingArray<1> kernel_size,
152
+ ExpandingArray<1> stride,
153
+ ExpandingArray<1> padding,
154
+ ExpandingArray<1> dilation,
155
+ bool ceil_mode) {
156
+ return torch::max_pool1d(
157
+ input, kernel_size, stride, padding, dilation, ceil_mode);
158
+ }
159
+ } // namespace detail
160
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
161
+
162
+ /// See
163
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool1d
164
+ /// about the exact behavior of this functional.
165
+ ///
166
+ /// See the documentation for `torch::nn::functional::MaxPool1dFuncOptions`
167
+ /// class to learn what optional arguments are supported for this functional.
168
+ ///
169
+ /// Example:
170
+ /// ```
171
+ /// namespace F = torch::nn::functional;
172
+ /// F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2));
173
+ /// ```
174
+ inline Tensor max_pool1d(
175
+ const Tensor& input,
176
+ const MaxPool1dFuncOptions& options) {
177
+ return detail::max_pool1d(
178
+ input,
179
+ options.kernel_size(),
180
+ options.stride(),
181
+ options.padding(),
182
+ options.dilation(),
183
+ options.ceil_mode());
184
+ }
185
+
186
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
187
+ namespace detail {
188
+ inline std::tuple<Tensor, Tensor> max_pool1d_with_indices(
189
+ const Tensor& input,
190
+ ExpandingArray<1> kernel_size,
191
+ ExpandingArray<1> stride,
192
+ ExpandingArray<1> padding,
193
+ ExpandingArray<1> dilation,
194
+ bool ceil_mode) {
195
+ return torch::max_pool1d_with_indices(
196
+ input, kernel_size, stride, padding, dilation, ceil_mode);
197
+ }
198
+ } // namespace detail
199
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
200
+
201
+ /// See the documentation for `torch::nn::functional::MaxPool1dFuncOptions`
202
+ /// class to learn what optional arguments are supported for this functional.
203
+ ///
204
+ /// Example:
205
+ /// ```
206
+ /// namespace F = torch::nn::functional;
207
+ /// F::max_pool1d_with_indices(x, F::MaxPool1dFuncOptions(3).stride(2));
208
+ /// ```
209
+ inline std::tuple<Tensor, Tensor> max_pool1d_with_indices(
210
+ const Tensor& input,
211
+ const MaxPool1dFuncOptions& options) {
212
+ return detail::max_pool1d_with_indices(
213
+ input,
214
+ options.kernel_size(),
215
+ options.stride(),
216
+ options.padding(),
217
+ options.dilation(),
218
+ options.ceil_mode());
219
+ }
220
+
221
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
222
+ namespace detail {
223
+ inline Tensor max_pool2d(
224
+ const Tensor& input,
225
+ ExpandingArray<2> kernel_size,
226
+ ExpandingArray<2> stride,
227
+ ExpandingArray<2> padding,
228
+ ExpandingArray<2> dilation,
229
+ bool ceil_mode) {
230
+ return torch::max_pool2d(
231
+ input, kernel_size, stride, padding, dilation, ceil_mode);
232
+ }
233
+ } // namespace detail
234
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
235
+
236
+ /// See
237
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool2d
238
+ /// about the exact behavior of this functional.
239
+ ///
240
+ /// See the documentation for `torch::nn::functional::MaxPool2dFuncOptions`
241
+ /// class to learn what optional arguments are supported for this functional.
242
+ ///
243
+ /// Example:
244
+ /// ```
245
+ /// namespace F = torch::nn::functional;
246
+ /// F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2));
247
+ /// ```
248
+ inline Tensor max_pool2d(
249
+ const Tensor& input,
250
+ const MaxPool2dFuncOptions& options) {
251
+ return detail::max_pool2d(
252
+ input,
253
+ options.kernel_size(),
254
+ options.stride(),
255
+ options.padding(),
256
+ options.dilation(),
257
+ options.ceil_mode());
258
+ }
259
+
260
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
261
+ namespace detail {
262
+ inline std::tuple<Tensor, Tensor> max_pool2d_with_indices(
263
+ const Tensor& input,
264
+ ExpandingArray<2> kernel_size,
265
+ ExpandingArray<2> stride,
266
+ ExpandingArray<2> padding,
267
+ ExpandingArray<2> dilation,
268
+ bool ceil_mode) {
269
+ return torch::max_pool2d_with_indices(
270
+ input, kernel_size, stride, padding, dilation, ceil_mode);
271
+ }
272
+ } // namespace detail
273
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
274
+
275
+ /// See the documentation for `torch::nn::functional::MaxPool2dFuncOptions`
276
+ /// class to learn what optional arguments are supported for this functional.
277
+ ///
278
+ /// Example:
279
+ /// ```
280
+ /// namespace F = torch::nn::functional;
281
+ /// F::max_pool2d_with_indices(x, F::MaxPool2dFuncOptions(3).stride(2));
282
+ /// ```
283
+ inline std::tuple<Tensor, Tensor> max_pool2d_with_indices(
284
+ const Tensor& input,
285
+ const MaxPool2dFuncOptions& options) {
286
+ return detail::max_pool2d_with_indices(
287
+ input,
288
+ options.kernel_size(),
289
+ options.stride(),
290
+ options.padding(),
291
+ options.dilation(),
292
+ options.ceil_mode());
293
+ }
294
+
295
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
296
+ namespace detail {
297
+ inline Tensor max_pool3d(
298
+ const Tensor& input,
299
+ ExpandingArray<3> kernel_size,
300
+ ExpandingArray<3> stride,
301
+ ExpandingArray<3> padding,
302
+ ExpandingArray<3> dilation,
303
+ bool ceil_mode) {
304
+ return torch::max_pool3d(
305
+ input, kernel_size, stride, padding, dilation, ceil_mode);
306
+ }
307
+ } // namespace detail
308
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
309
+
310
+ /// See
311
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool3d
312
+ /// about the exact behavior of this functional.
313
+ ///
314
+ /// See the documentation for `torch::nn::functional::MaxPool3dFuncOptions`
315
+ /// class to learn what optional arguments are supported for this functional.
316
+ ///
317
+ /// Example:
318
+ /// ```
319
+ /// namespace F = torch::nn::functional;
320
+ /// F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2));
321
+ /// ```
322
+ inline Tensor max_pool3d(
323
+ const Tensor& input,
324
+ const MaxPool3dFuncOptions& options) {
325
+ return detail::max_pool3d(
326
+ input,
327
+ options.kernel_size(),
328
+ options.stride(),
329
+ options.padding(),
330
+ options.dilation(),
331
+ options.ceil_mode());
332
+ }
333
+
334
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
335
+ namespace detail {
336
+ inline std::tuple<Tensor, Tensor> max_pool3d_with_indices(
337
+ const Tensor& input,
338
+ ExpandingArray<3> kernel_size,
339
+ ExpandingArray<3> stride,
340
+ ExpandingArray<3> padding,
341
+ ExpandingArray<3> dilation,
342
+ bool ceil_mode) {
343
+ return torch::max_pool3d_with_indices(
344
+ input, kernel_size, stride, padding, dilation, ceil_mode);
345
+ }
346
+ } // namespace detail
347
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
348
+
349
+ /// See the documentation for `torch::nn::functional::MaxPool3dFuncOptions`
350
+ /// class to learn what optional arguments are supported for this functional.
351
+ ///
352
+ /// Example:
353
+ /// ```
354
+ /// namespace F = torch::nn::functional;
355
+ /// F::max_pool3d_with_indices(x, F::MaxPool3dFuncOptions(3).stride(2));
356
+ /// ```
357
+ inline std::tuple<Tensor, Tensor> max_pool3d_with_indices(
358
+ const Tensor& input,
359
+ const MaxPool3dFuncOptions& options) {
360
+ return detail::max_pool3d_with_indices(
361
+ input,
362
+ options.kernel_size(),
363
+ options.stride(),
364
+ options.padding(),
365
+ options.dilation(),
366
+ options.ceil_mode());
367
+ }
368
+
369
+ // ============================================================================
370
+
371
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
372
+ namespace detail {
373
+ inline std::tuple<Tensor, Tensor> adaptive_max_pool1d_with_indices(
374
+ const Tensor& input,
375
+ ExpandingArray<1> output_size) {
376
+ return torch::adaptive_max_pool1d(input, output_size);
377
+ }
378
+ } // namespace detail
379
+
380
+ /// See the documentation for
381
+ /// `torch::nn::functional::AdaptiveMaxPool1dFuncOptions` class to learn what
382
+ /// optional arguments are supported for this functional.
383
+ ///
384
+ /// Example:
385
+ /// ```
386
+ /// namespace F = torch::nn::functional;
387
+ /// F::adaptive_max_pool1d_with_indices(x, F::AdaptiveMaxPool1dFuncOptions(3));
388
+ /// ```
389
+ inline std::tuple<Tensor, Tensor> adaptive_max_pool1d_with_indices(
390
+ const Tensor& input,
391
+ const AdaptiveMaxPool1dFuncOptions& options) {
392
+ return detail::adaptive_max_pool1d_with_indices(input, options.output_size());
393
+ }
394
+
395
+ namespace detail {
396
+ inline Tensor adaptive_max_pool1d(
397
+ const Tensor& input,
398
+ ExpandingArray<1> output_size) {
399
+ return std::get<0>(adaptive_max_pool1d_with_indices(input, output_size));
400
+ }
401
+ } // namespace detail
402
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
403
+
404
+ /// See
405
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool1d
406
+ /// about the exact behavior of this functional.
407
+ ///
408
+ /// See the documentation for
409
+ /// `torch::nn::functional::AdaptiveMaxPool1dFuncOptions` class to learn what
410
+ /// optional arguments are supported for this functional.
411
+ ///
412
+ /// Example:
413
+ /// ```
414
+ /// namespace F = torch::nn::functional;
415
+ /// F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3));
416
+ /// ```
417
+ inline Tensor adaptive_max_pool1d(
418
+ const Tensor& input,
419
+ const AdaptiveMaxPool1dFuncOptions& options) {
420
+ return detail::adaptive_max_pool1d(input, options.output_size());
421
+ }
422
+
423
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
424
+ namespace detail {
425
+ inline std::tuple<Tensor, Tensor> adaptive_max_pool2d_with_indices(
426
+ const Tensor& input,
427
+ ExpandingArrayWithOptionalElem<2> output_size) {
428
+ auto output_size_ =
429
+ torch::nn::modules::utils::_list_with_default(output_size, input.sizes());
430
+ return torch::adaptive_max_pool2d(input, output_size_);
431
+ }
432
+ } // namespace detail
433
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
434
+
435
+ /// See the documentation for
436
+ /// `torch::nn::functional::AdaptiveMaxPool2dFuncOptions` class to learn what
437
+ /// optional arguments are supported for this functional.
438
+ ///
439
+ /// Example:
440
+ /// ```
441
+ /// namespace F = torch::nn::functional;
442
+ /// F::adaptive_max_pool2d_with_indices(x, F::AdaptiveMaxPool2dFuncOptions(3));
443
+ /// ```
444
+ inline std::tuple<Tensor, Tensor> adaptive_max_pool2d_with_indices(
445
+ const Tensor& input,
446
+ const AdaptiveMaxPool2dFuncOptions& options) {
447
+ return detail::adaptive_max_pool2d_with_indices(input, options.output_size());
448
+ }
449
+
450
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
451
+ namespace detail {
452
+ inline Tensor adaptive_max_pool2d(
453
+ const Tensor& input,
454
+ ExpandingArrayWithOptionalElem<2> output_size) {
455
+ return std::get<0>(adaptive_max_pool2d_with_indices(input, output_size));
456
+ }
457
+ } // namespace detail
458
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
459
+
460
+ /// See
461
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool2d
462
+ /// about the exact behavior of this functional.
463
+ ///
464
+ /// See the documentation for
465
+ /// `torch::nn::functional::AdaptiveMaxPool2dFuncOptions` class to learn what
466
+ /// optional arguments are supported for this functional.
467
+ ///
468
+ /// Example:
469
+ /// ```
470
+ /// namespace F = torch::nn::functional;
471
+ /// F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3));
472
+ /// ```
473
+ inline Tensor adaptive_max_pool2d(
474
+ const Tensor& input,
475
+ const AdaptiveMaxPool2dFuncOptions& options) {
476
+ return detail::adaptive_max_pool2d(input, options.output_size());
477
+ }
478
+
479
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
480
+ namespace detail {
481
+ inline std::tuple<Tensor, Tensor> adaptive_max_pool3d_with_indices(
482
+ const Tensor& input,
483
+ ExpandingArrayWithOptionalElem<3> output_size) {
484
+ auto output_size_ =
485
+ torch::nn::modules::utils::_list_with_default(output_size, input.sizes());
486
+ return torch::adaptive_max_pool3d(input, output_size_);
487
+ }
488
+ } // namespace detail
489
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
490
+
491
+ /// See the documentation for
492
+ /// `torch::nn::functional::AdaptiveMaxPool3dFuncOptions` class to learn what
493
+ /// optional arguments are supported for this functional.
494
+ ///
495
+ /// Example:
496
+ /// ```
497
+ /// namespace F = torch::nn::functional;
498
+ /// F::adaptive_max_pool3d_with_indices(x, F::AdaptiveMaxPool3dFuncOptions(3));
499
+ /// ```
500
+ inline std::tuple<Tensor, Tensor> adaptive_max_pool3d_with_indices(
501
+ const Tensor& input,
502
+ const AdaptiveMaxPool3dFuncOptions& options) {
503
+ return detail::adaptive_max_pool3d_with_indices(input, options.output_size());
504
+ }
505
+
506
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
507
+ namespace detail {
508
+ inline Tensor adaptive_max_pool3d(
509
+ const Tensor& input,
510
+ ExpandingArrayWithOptionalElem<3> output_size) {
511
+ return std::get<0>(adaptive_max_pool3d_with_indices(input, output_size));
512
+ }
513
+ } // namespace detail
514
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
515
+
516
+ /// See
517
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool3d
518
+ /// about the exact behavior of this functional.
519
+ ///
520
+ /// See the documentation for
521
+ /// `torch::nn::functional::AdaptiveMaxPool3dFuncOptions` class to learn what
522
+ /// optional arguments are supported for this functional.
523
+ ///
524
+ /// Example:
525
+ /// ```
526
+ /// namespace F = torch::nn::functional;
527
+ /// F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3));
528
+ /// ```
529
+ inline Tensor adaptive_max_pool3d(
530
+ const Tensor& input,
531
+ const AdaptiveMaxPool3dFuncOptions& options) {
532
+ return detail::adaptive_max_pool3d(input, options.output_size());
533
+ }
534
+
535
+ // ============================================================================
536
+
537
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
538
+ namespace detail {
539
+ inline Tensor adaptive_avg_pool1d(
540
+ const Tensor& input,
541
+ ExpandingArray<1> output_size) {
542
+ return torch::adaptive_avg_pool1d(input, output_size);
543
+ }
544
+ } // namespace detail
545
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
546
+
547
+ /// See
548
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool1d
549
+ /// about the exact behavior of this functional.
550
+ ///
551
+ /// See the documentation for
552
+ /// `torch::nn::functional::AdaptiveAvgPool1dFuncOptions` class to learn what
553
+ /// optional arguments are supported for this functional.
554
+ ///
555
+ /// Example:
556
+ /// ```
557
+ /// namespace F = torch::nn::functional;
558
+ /// F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3));
559
+ /// ```
560
+ inline Tensor adaptive_avg_pool1d(
561
+ const Tensor& input,
562
+ const AdaptiveAvgPool1dFuncOptions& options) {
563
+ return detail::adaptive_avg_pool1d(input, options.output_size());
564
+ }
565
+
566
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
567
+ namespace detail {
568
+ inline Tensor adaptive_avg_pool2d(
569
+ const Tensor& input,
570
+ ExpandingArrayWithOptionalElem<2> output_size) {
571
+ auto output_size_ =
572
+ torch::nn::modules::utils::_list_with_default(output_size, input.sizes());
573
+ return torch::adaptive_avg_pool2d(input, output_size_);
574
+ }
575
+ } // namespace detail
576
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
577
+
578
+ /// See
579
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool2d
580
+ /// about the exact behavior of this functional.
581
+ ///
582
+ /// See the documentation for
583
+ /// `torch::nn::functional::AdaptiveAvgPool2dFuncOptions` class to learn what
584
+ /// optional arguments are supported for this functional.
585
+ ///
586
+ /// Example:
587
+ /// ```
588
+ /// namespace F = torch::nn::functional;
589
+ /// F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3));
590
+ /// ```
591
+ inline Tensor adaptive_avg_pool2d(
592
+ const Tensor& input,
593
+ const AdaptiveAvgPool2dFuncOptions& options) {
594
+ return detail::adaptive_avg_pool2d(input, options.output_size());
595
+ }
596
+
597
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
598
+ namespace detail {
599
+ inline Tensor adaptive_avg_pool3d(
600
+ const Tensor& input,
601
+ ExpandingArrayWithOptionalElem<3> output_size) {
602
+ auto output_size_ =
603
+ torch::nn::modules::utils::_list_with_default(output_size, input.sizes());
604
+ return torch::adaptive_avg_pool3d(input, output_size_);
605
+ }
606
+ } // namespace detail
607
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
608
+
609
+ /// See
610
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool3d
611
+ /// about the exact behavior of this functional.
612
+ ///
613
+ /// See the documentation for
614
+ /// `torch::nn::functional::AdaptiveAvgPool3dFuncOptions` class to learn what
615
+ /// optional arguments are supported for this functional.
616
+ ///
617
+ /// Example:
618
+ /// ```
619
+ /// namespace F = torch::nn::functional;
620
+ /// F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3));
621
+ /// ```
622
+ inline Tensor adaptive_avg_pool3d(
623
+ const Tensor& input,
624
+ const AdaptiveAvgPool3dFuncOptions& options) {
625
+ return detail::adaptive_avg_pool3d(input, options.output_size());
626
+ }
627
+
628
+ // ============================================================================
629
+
630
+ inline std::vector<int64_t> _unpool_output_size(
631
+ const Tensor& input,
632
+ const IntArrayRef& kernel_size,
633
+ const IntArrayRef& stride,
634
+ const IntArrayRef& padding,
635
+ const c10::optional<std::vector<int64_t>>& output_size) {
636
+ auto input_size = input.sizes();
637
+ std::vector<int64_t> default_size;
638
+ for (const auto d : c10::irange(kernel_size.size())) {
639
+ default_size.push_back(
640
+ (input_size[input_size.size() - kernel_size.size() + d] - 1) *
641
+ stride[d] +
642
+ kernel_size[d] - 2 * padding[d]);
643
+ }
644
+ if (!output_size) {
645
+ return default_size;
646
+ } else {
647
+ std::vector<int64_t> output_size_;
648
+ if (output_size->size() == kernel_size.size() + 2) {
649
+ output_size_ = IntArrayRef(*output_size).slice(2).vec();
650
+ }
651
+ if (output_size_.size() != kernel_size.size()) {
652
+ TORCH_CHECK(
653
+ false,
654
+ "output_size should be a sequence containing ",
655
+ kernel_size.size(),
656
+ " or ",
657
+ kernel_size.size() + 2,
658
+ " elements, but it has a length of '",
659
+ output_size_.size(),
660
+ "'");
661
+ }
662
+ for (const auto d : c10::irange(kernel_size.size())) {
663
+ const auto min_size = default_size[d] - stride[d];
664
+ const auto max_size = default_size[d] + stride[d];
665
+ if (!(min_size <= output_size_[d] && output_size_[d] <= max_size)) {
666
+ TORCH_CHECK(
667
+ false,
668
+ "invalid output_size ",
669
+ output_size_,
670
+ " (dim ",
671
+ d,
672
+ " must be between ",
673
+ min_size,
674
+ " and ",
675
+ max_size,
676
+ ")");
677
+ }
678
+ }
679
+ return output_size_;
680
+ }
681
+ }
682
+
683
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
684
+ namespace detail {
685
+ inline Tensor max_unpool1d(
686
+ const Tensor& input,
687
+ const Tensor& indices,
688
+ ExpandingArray<1> kernel_size,
689
+ ExpandingArray<1> stride,
690
+ ExpandingArray<1> padding,
691
+ const c10::optional<std::vector<int64_t>>& output_size) {
692
+ auto output_size_ =
693
+ _unpool_output_size(input, kernel_size, stride, padding, output_size);
694
+ output_size_.push_back(1);
695
+ return torch::max_unpool2d(
696
+ input.unsqueeze(-1), indices.unsqueeze(-1), output_size_)
697
+ .squeeze(-1);
698
+ }
699
+ } // namespace detail
700
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
701
+
702
+ /// See
703
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool1d
704
+ /// about the exact behavior of this functional.
705
+ ///
706
+ /// See the documentation for `torch::nn::functional::MaxUnpool1dFuncOptions`
707
+ /// class to learn what optional arguments are supported for this functional.
708
+ ///
709
+ /// Example:
710
+ /// ```
711
+ /// namespace F = torch::nn::functional;
712
+ /// F::max_unpool1d(x, indices,
713
+ /// F::MaxUnpool1dFuncOptions(3).stride(2).padding(1));
714
+ /// ```
715
+ inline Tensor max_unpool1d(
716
+ const Tensor& input,
717
+ const Tensor& indices,
718
+ const MaxUnpool1dFuncOptions& options) {
719
+ return detail::max_unpool1d(
720
+ input,
721
+ indices,
722
+ options.kernel_size(),
723
+ options.stride(),
724
+ options.padding(),
725
+ options.output_size());
726
+ }
727
+
728
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
729
+ namespace detail {
730
+ inline Tensor max_unpool2d(
731
+ const Tensor& input,
732
+ const Tensor& indices,
733
+ ExpandingArray<2> kernel_size,
734
+ ExpandingArray<2> stride,
735
+ ExpandingArray<2> padding,
736
+ const c10::optional<std::vector<int64_t>>& output_size) {
737
+ auto output_size_ =
738
+ _unpool_output_size(input, kernel_size, stride, padding, output_size);
739
+
740
+ return torch::max_unpool2d(input, indices, output_size_);
741
+ }
742
+ } // namespace detail
743
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
744
+
745
+ /// See
746
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool2d
747
+ /// about the exact behavior of this functional.
748
+ ///
749
+ /// See the documentation for `torch::nn::functional::MaxUnpool2dFuncOptions`
750
+ /// class to learn what optional arguments are supported for this functional.
751
+ ///
752
+ /// Example:
753
+ /// ```
754
+ /// namespace F = torch::nn::functional;
755
+ /// F::max_unpool2d(x, indices,
756
+ /// F::MaxUnpool2dFuncOptions(3).stride(2).padding(1));
757
+ /// ```
758
+ inline Tensor max_unpool2d(
759
+ const Tensor& input,
760
+ const Tensor& indices,
761
+ const MaxUnpool2dFuncOptions& options) {
762
+ return detail::max_unpool2d(
763
+ input,
764
+ indices,
765
+ options.kernel_size(),
766
+ options.stride(),
767
+ options.padding(),
768
+ options.output_size());
769
+ }
770
+
771
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
772
+ namespace detail {
773
+ inline Tensor max_unpool3d(
774
+ const Tensor& input,
775
+ const Tensor& indices,
776
+ ExpandingArray<3> kernel_size,
777
+ ExpandingArray<3> stride,
778
+ ExpandingArray<3> padding,
779
+ const c10::optional<std::vector<int64_t>>& output_size) {
780
+ auto output_size_ =
781
+ _unpool_output_size(input, kernel_size, stride, padding, output_size);
782
+
783
+ return torch::max_unpool3d(input, indices, output_size_, stride, padding);
784
+ }
785
+ } // namespace detail
786
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
787
+
788
+ /// See
789
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool3d
790
+ /// about the exact behavior of this functional.
791
+ ///
792
+ /// See the documentation for `torch::nn::functional::MaxUnpool3dFuncOptions`
793
+ /// class to learn what optional arguments are supported for this functional.
794
+ ///
795
+ /// Example:
796
+ /// ```
797
+ /// namespace F = torch::nn::functional;
798
+ /// F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3));
799
+ /// ```
800
+ inline Tensor max_unpool3d(
801
+ const Tensor& input,
802
+ const Tensor& indices,
803
+ const MaxUnpool3dFuncOptions& options) {
804
+ return detail::max_unpool3d(
805
+ input,
806
+ indices,
807
+ options.kernel_size(),
808
+ options.stride(),
809
+ options.padding(),
810
+ options.output_size());
811
+ }
812
+
813
+ // ============================================================================
814
+
815
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
816
+ namespace detail {
817
+ inline std::tuple<Tensor, Tensor> fractional_max_pool2d_with_indices(
818
+ const Tensor& input,
819
+ const ExpandingArray<2>& kernel_size,
820
+ const c10::optional<ExpandingArray<2>>& output_size,
821
+ const c10::optional<ExpandingArray<2, double>>& output_ratio,
822
+ const Tensor& _random_samples) {
823
+ if (output_size == c10::nullopt && output_ratio == c10::nullopt) {
824
+ TORCH_CHECK(
825
+ false,
826
+ "fractional_max_pool2d requires specifying either ",
827
+ "an output_size or an output_ratio");
828
+ }
829
+ c10::optional<ExpandingArray<2>> output_size_ = output_size;
830
+ if (output_size_ == c10::nullopt) {
831
+ TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt);
832
+ output_size_ = {
833
+ (int64_t)(static_cast<double>(input.size(-2)) *
834
+ (*output_ratio.value())[0]),
835
+ (int64_t)(static_cast<double>(input.size(-1)) *
836
+ (*output_ratio.value())[1])};
837
+ }
838
+
839
+ Tensor _random_samples_ = _random_samples;
840
+ if (!_random_samples_.defined()) {
841
+ auto n_batch = input.dim() == 3 ? 1 : input.size(0);
842
+ _random_samples_ = torch::rand(
843
+ {n_batch, input.size(-3), 2},
844
+ torch::TensorOptions().dtype(input.dtype()).device(input.device()));
845
+ }
846
+ return torch::fractional_max_pool2d(
847
+ input, kernel_size, *output_size_, _random_samples_);
848
+ }
849
+ } // namespace detail
850
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
851
+
852
+ /// See the documentation for
853
+ /// `torch::nn::functional::FractionalMaxPool2dFuncOptions` class to learn what
854
+ /// optional arguments are supported for this functional.
855
+ ///
856
+ /// Example:
857
+ /// ```
858
+ /// namespace F = torch::nn::functional;
859
+ /// F::fractional_max_pool2d_with_indices(x,
860
+ /// F::FractionalMaxPool2dFuncOptions(3).output_size(2));
861
+ /// ```
862
+ inline std::tuple<Tensor, Tensor> fractional_max_pool2d_with_indices(
863
+ const Tensor& input,
864
+ const FractionalMaxPool2dFuncOptions& options) {
865
+ return detail::fractional_max_pool2d_with_indices(
866
+ input,
867
+ options.kernel_size(),
868
+ options.output_size(),
869
+ options.output_ratio(),
870
+ options._random_samples());
871
+ }
872
+
873
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
874
+ namespace detail {
875
+ inline Tensor fractional_max_pool2d(
876
+ const Tensor& input,
877
+ ExpandingArray<2> kernel_size,
878
+ c10::optional<ExpandingArray<2>> output_size,
879
+ c10::optional<ExpandingArray<2, double>> output_ratio,
880
+ const Tensor& _random_samples) {
881
+ return std::get<0>(fractional_max_pool2d_with_indices(
882
+ input, kernel_size, output_size, output_ratio, _random_samples));
883
+ }
884
+ } // namespace detail
885
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
886
+
887
+ /// See the documentation for
888
+ /// `torch::nn::functional::FractionalMaxPool2dFuncOptions` class to learn what
889
+ /// optional arguments are supported for this functional.
890
+ ///
891
+ /// Example:
892
+ /// ```
893
+ /// namespace F = torch::nn::functional;
894
+ /// F::fractional_max_pool2d(x,
895
+ /// F::FractionalMaxPool2dFuncOptions(3).output_size(2));
896
+ /// ```
897
+ inline Tensor fractional_max_pool2d(
898
+ const Tensor& input,
899
+ const FractionalMaxPool2dFuncOptions& options) {
900
+ return detail::fractional_max_pool2d(
901
+ input,
902
+ options.kernel_size(),
903
+ options.output_size(),
904
+ options.output_ratio(),
905
+ options._random_samples());
906
+ }
907
+
908
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
909
+ namespace detail {
910
+ inline std::tuple<Tensor, Tensor> fractional_max_pool3d_with_indices(
911
+ const Tensor& input,
912
+ const ExpandingArray<3>& kernel_size,
913
+ const c10::optional<ExpandingArray<3>>& output_size,
914
+ const c10::optional<ExpandingArray<3, double>>& output_ratio,
915
+ const Tensor& _random_samples) {
916
+ if (output_size == c10::nullopt && output_ratio == c10::nullopt) {
917
+ TORCH_CHECK(
918
+ false,
919
+ "fractional_max_pool3d requires specifying either ",
920
+ "an output_size or an output_ratio");
921
+ }
922
+
923
+ c10::optional<ExpandingArray<3>> output_size_ = output_size;
924
+ if (output_size_ == c10::nullopt) {
925
+ TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt);
926
+ output_size_ = {
927
+ (int64_t)(static_cast<double>(input.size(-3)) *
928
+ (*output_ratio.value())[0]),
929
+ (int64_t)(static_cast<double>(input.size(-2)) *
930
+ (*output_ratio.value())[1]),
931
+ (int64_t)(static_cast<double>(input.size(-1)) *
932
+ (*output_ratio.value())[2])};
933
+ }
934
+
935
+ Tensor _random_samples_ = _random_samples;
936
+ if (!_random_samples_.defined()) {
937
+ auto n_batch = input.dim() == 4 ? 1 : input.size(0);
938
+ _random_samples_ = torch::rand(
939
+ {n_batch, input.size(-4), 3},
940
+ torch::TensorOptions().dtype(input.dtype()).device(input.device()));
941
+ }
942
+ return torch::fractional_max_pool3d(
943
+ input, kernel_size, *output_size_, _random_samples_);
944
+ }
945
+ } // namespace detail
946
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
947
+
948
+ /// See the documentation for
949
+ /// `torch::nn::functional::FractionalMaxPool3dFuncOptions` class to learn what
950
+ /// optional arguments are supported for this functional.
951
+ ///
952
+ /// Example:
953
+ /// ```
954
+ /// namespace F = torch::nn::functional;
955
+ /// F::fractional_max_pool3d_with_indices(x,
956
+ /// F::FractionalMaxPool3dFuncOptions(3).output_size(2));
957
+ /// ```
958
+ inline std::tuple<Tensor, Tensor> fractional_max_pool3d_with_indices(
959
+ const Tensor& input,
960
+ const FractionalMaxPool3dFuncOptions& options) {
961
+ return detail::fractional_max_pool3d_with_indices(
962
+ input,
963
+ options.kernel_size(),
964
+ options.output_size(),
965
+ options.output_ratio(),
966
+ options._random_samples());
967
+ }
968
+
969
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
970
+ namespace detail {
971
+ inline Tensor fractional_max_pool3d(
972
+ const Tensor& input,
973
+ ExpandingArray<3> kernel_size,
974
+ c10::optional<ExpandingArray<3>> output_size,
975
+ c10::optional<ExpandingArray<3, double>> output_ratio,
976
+ const Tensor& _random_samples) {
977
+ return std::get<0>(fractional_max_pool3d_with_indices(
978
+ input, kernel_size, output_size, output_ratio, _random_samples));
979
+ }
980
+ } // namespace detail
981
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
982
+
983
+ /// See the documentation for
984
+ /// `torch::nn::functional::FractionalMaxPool3dFuncOptions` class to learn what
985
+ /// optional arguments are supported for this functional.
986
+ ///
987
+ /// Example:
988
+ /// ```
989
+ /// namespace F = torch::nn::functional;
990
+ /// F::fractional_max_pool3d(x,
991
+ /// F::FractionalMaxPool3dFuncOptions(3).output_size(2));
992
+ /// ```
993
+ inline Tensor fractional_max_pool3d(
994
+ const Tensor& input,
995
+ const FractionalMaxPool3dFuncOptions& options) {
996
+ return detail::fractional_max_pool3d(
997
+ input,
998
+ options.kernel_size(),
999
+ options.output_size(),
1000
+ options.output_ratio(),
1001
+ options._random_samples());
1002
+ }
1003
+
1004
+ // ============================================================================
1005
+
1006
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
1007
+ namespace detail {
1008
+ inline Tensor lp_pool1d(
1009
+ const Tensor& input,
1010
+ double norm_type,
1011
+ ExpandingArray<1> kernel_size,
1012
+ ExpandingArray<1> stride,
1013
+ bool ceil_mode) {
1014
+ Tensor out = detail::avg_pool1d(
1015
+ input.pow(norm_type),
1016
+ kernel_size,
1017
+ stride,
1018
+ /*padding=*/0,
1019
+ ceil_mode,
1020
+ /*count_include_pad=*/true);
1021
+
1022
+ return (torch::sign(out) * relu(torch::abs(out)))
1023
+ .mul((*kernel_size)[0])
1024
+ .pow(1. / norm_type);
1025
+ }
1026
+ } // namespace detail
1027
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
1028
+
1029
+ /// See
1030
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool1d
1031
+ /// about the exact behavior of this functional.
1032
+ ///
1033
+ /// See the documentation for `torch::nn::functional::LPPool1dFuncOptions` class
1034
+ /// to learn what optional arguments are supported for this functional.
1035
+ ///
1036
+ /// Example:
1037
+ /// ```
1038
+ /// namespace F = torch::nn::functional;
1039
+ /// F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2));
1040
+ /// ```
1041
+ inline Tensor lp_pool1d(
1042
+ const Tensor& input,
1043
+ const LPPool1dFuncOptions& options) {
1044
+ return detail::lp_pool1d(
1045
+ input,
1046
+ options.norm_type(),
1047
+ options.kernel_size(),
1048
+ options.stride(),
1049
+ options.ceil_mode());
1050
+ }
1051
+
1052
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
1053
+ namespace detail {
1054
+ inline Tensor lp_pool2d(
1055
+ const Tensor& input,
1056
+ double norm_type,
1057
+ ExpandingArray<2> kernel_size,
1058
+ ExpandingArray<2> stride,
1059
+ bool ceil_mode) {
1060
+ int kw = (*kernel_size)[0];
1061
+ int kh = (*kernel_size)[1];
1062
+ Tensor out = detail::avg_pool2d(
1063
+ input.pow(norm_type),
1064
+ kernel_size,
1065
+ stride,
1066
+ /*padding=*/0,
1067
+ ceil_mode,
1068
+ /*count_include_pad=*/true,
1069
+ /*divisor_override=*/c10::nullopt);
1070
+
1071
+ return (torch::sign(out) * relu(torch::abs(out)))
1072
+ .mul(kw * kh)
1073
+ .pow(1. / norm_type);
1074
+ }
1075
+ } // namespace detail
1076
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
1077
+
1078
+ /// See
1079
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool2d
1080
+ /// about the exact behavior of this functional.
1081
+ ///
1082
+ /// See the documentation for `torch::nn::functional::LPPool2dFuncOptions` class
1083
+ /// to learn what optional arguments are supported for this functional.
1084
+ ///
1085
+ /// Example:
1086
+ /// ```
1087
+ /// namespace F = torch::nn::functional;
1088
+ /// F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2));
1089
+ /// ```
1090
+ inline Tensor lp_pool2d(
1091
+ const Tensor& input,
1092
+ const LPPool2dFuncOptions& options) {
1093
+ return detail::lp_pool2d(
1094
+ input,
1095
+ options.norm_type(),
1096
+ options.kernel_size(),
1097
+ options.stride(),
1098
+ options.ceil_mode());
1099
+ }
1100
+
1101
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
1102
+ namespace detail {
1103
+ inline Tensor lp_pool3d(
1104
+ const Tensor& input,
1105
+ double norm_type,
1106
+ ExpandingArray<3> kernel_size,
1107
+ ExpandingArray<3> stride,
1108
+ bool ceil_mode) {
1109
+ int kd = (*kernel_size)[0];
1110
+ int kw = (*kernel_size)[1];
1111
+ int kh = (*kernel_size)[2];
1112
+ Tensor out = detail::avg_pool3d(
1113
+ input.pow(norm_type),
1114
+ kernel_size,
1115
+ stride,
1116
+ /*padding=*/0,
1117
+ ceil_mode,
1118
+ /*count_include_pad=*/true,
1119
+ /*divisor_override=*/c10::nullopt);
1120
+
1121
+ return (torch::sign(out) * relu(torch::abs(out)))
1122
+ .mul(kd * kw * kh)
1123
+ .pow(1. / norm_type);
1124
+ }
1125
+ } // namespace detail
1126
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
1127
+
1128
+ /// See
1129
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool3d
1130
+ /// about the exact behavior of this functional.
1131
+ ///
1132
+ /// See the documentation for `torch::nn::functional::LPPool3dFuncOptions` class
1133
+ /// to learn what optional arguments are supported for this functional.
1134
+ ///
1135
+ /// Example:
1136
+ /// ```
1137
+ /// namespace F = torch::nn::functional;
1138
+ /// F::lp_pool3d(x, F::LPPool3dFuncOptions(3, {3, 3, 5}).stride(3));
1139
+ /// ```
1140
+ inline Tensor lp_pool3d(
1141
+ const Tensor& input,
1142
+ const LPPool3dFuncOptions& options) {
1143
+ return detail::lp_pool3d(
1144
+ input,
1145
+ options.norm_type(),
1146
+ options.kernel_size(),
1147
+ options.stride(),
1148
+ options.ceil_mode());
1149
+ }
1150
+
1151
+ } // namespace functional
1152
+ } // namespace nn
1153
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/upsampling.h ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/nn/functional/pooling.h>
5
+ #include <torch/nn/options/upsampling.h>
6
+
7
+ #include <cmath>
8
+ #include <utility>
9
+
10
+ namespace torch {
11
+ namespace nn {
12
+ namespace functional {
13
+
14
+ inline std::vector<int64_t> _interp_output_size(
15
+ int64_t dim,
16
+ std::tuple<
17
+ Tensor,
18
+ c10::optional<std::vector<int64_t>>,
19
+ c10::optional<std::vector<double>>,
20
+ c10::optional<bool>> closed_over_args) {
21
+ auto [input, size, scale_factor, recompute_scale_factor] = closed_over_args;
22
+ if (size == c10::nullopt && scale_factor == c10::nullopt) {
23
+ TORCH_CHECK(false, "either size or scale_factor should be defined");
24
+ }
25
+ if (size != c10::nullopt && scale_factor != c10::nullopt) {
26
+ TORCH_CHECK(false, "only one of size or scale_factor should be defined");
27
+ }
28
+ if (scale_factor != c10::nullopt) {
29
+ if (static_cast<int64_t>(scale_factor.value().size()) != dim) {
30
+ TORCH_CHECK(
31
+ false,
32
+ "scale_factor shape must match input shape. ",
33
+ "Input is ",
34
+ dim,
35
+ "D, scale_factor size is ",
36
+ torch::ArrayRef<double>(*scale_factor));
37
+ }
38
+ }
39
+ if (size != c10::nullopt) {
40
+ return *size;
41
+ }
42
+
43
+ TORCH_INTERNAL_ASSERT(scale_factor != c10::nullopt);
44
+ auto scale_factors = *scale_factor;
45
+
46
+ if (recompute_scale_factor == c10::nullopt) {
47
+ // only warn when the scales have floating values since
48
+ // the result for ints is the same with/without recompute_scale_factor
49
+ bool is_float_scale_factor = false;
50
+ for (double scale : scale_factors) {
51
+ is_float_scale_factor = floor(scale) != scale;
52
+ if (is_float_scale_factor) {
53
+ break;
54
+ }
55
+ }
56
+ if (is_float_scale_factor) {
57
+ TORCH_WARN(
58
+ "The default behavior for interpolate/upsample with float scale_factor changed "
59
+ "in 1.6.0 to align with other frameworks/libraries, and uses scale_factor directly, "
60
+ "instead of relying on the computed output size. "
61
+ "If you wish to keep the old behavior, please set recompute_scale_factor=True. "
62
+ "See the documentation of nn.Upsample for details. ");
63
+ }
64
+ }
65
+
66
+ std::vector<int64_t> ret;
67
+ for (const auto i : c10::irange(dim)) {
68
+ ret.emplace_back(static_cast<int64_t>(
69
+ floor(static_cast<double>(input.size(i + 2)) * scale_factors[i])));
70
+ }
71
+ return ret;
72
+ }
73
+
74
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
75
+ namespace detail {
76
+ inline Tensor interpolate(
77
+ const Tensor& input,
78
+ const c10::optional<std::vector<int64_t>>& size,
79
+ const c10::optional<std::vector<double>>& scale_factor,
80
+ InterpolateFuncOptions::mode_t mode,
81
+ c10::optional<bool> align_corners,
82
+ c10::optional<bool> recompute_scale_factor,
83
+ bool antialias) {
84
+ if (std::holds_alternative<enumtype::kNearest>(mode) ||
85
+ std::get_if<enumtype::kArea>(&mode)) {
86
+ if (align_corners != c10::nullopt) {
87
+ TORCH_CHECK(
88
+ false,
89
+ "align_corners option can only be set with the "
90
+ "interpolating modes: linear | bilinear | bicubic | trilinear");
91
+ }
92
+ } else {
93
+ if (align_corners == c10::nullopt) {
94
+ TORCH_WARN(
95
+ "Default upsampling behavior when mode=",
96
+ enumtype::get_enum_name(mode),
97
+ " is changed "
98
+ "to align_corners=False since 0.4.0. Please specify "
99
+ "align_corners=True if the old behavior is desired. "
100
+ "See the documentation of nn.Upsample for details.");
101
+ align_corners = false;
102
+ }
103
+ }
104
+
105
+ TORCH_CHECK(
106
+ input.dim() >= 3 && input.dim() <= 5,
107
+ "Input Error: Only 3D, 4D and 5D input Tensors supported "
108
+ "(got ",
109
+ input.dim(),
110
+ "D) for the modes: nearest | linear | bilinear | bicubic | trilinear "
111
+ "(got ",
112
+ enumtype::get_enum_name(mode),
113
+ ")");
114
+
115
+ auto scale_factor_len = input.dim() - 2;
116
+ std::vector<c10::optional<double>> scale_factor_list(
117
+ scale_factor_len, c10::nullopt);
118
+ if (scale_factor != c10::nullopt && !recompute_scale_factor.value_or(false)) {
119
+ auto _scale_factor_repeated = *scale_factor;
120
+ scale_factor_list = {};
121
+ for (const auto& elem : _scale_factor_repeated) {
122
+ scale_factor_list.emplace_back(elem);
123
+ }
124
+ }
125
+
126
+ if (antialias &&
127
+ !(input.dim() == 4 &&
128
+ (std::get_if<enumtype::kBilinear>(&mode) ||
129
+ std::get_if<enumtype::kBicubic>(&mode)))) {
130
+ TORCH_CHECK(
131
+ false,
132
+ "Anti-alias option is only supported for bilinear and bicubic modes");
133
+ }
134
+
135
+ auto closed_over_args =
136
+ std::make_tuple(input, size, scale_factor, recompute_scale_factor);
137
+ if (input.dim() == 3 && std::get_if<enumtype::kNearest>(&mode)) {
138
+ return torch::upsample_nearest1d(
139
+ input,
140
+ _interp_output_size(1, std::move(closed_over_args)),
141
+ scale_factor_list.at(0));
142
+ } else if (input.dim() == 4 && std::get_if<enumtype::kNearest>(&mode)) {
143
+ return torch::upsample_nearest2d(
144
+ input,
145
+ _interp_output_size(2, std::move(closed_over_args)),
146
+ scale_factor_list.at(0),
147
+ scale_factor_list.at(1));
148
+ } else if (input.dim() == 5 && std::get_if<enumtype::kNearest>(&mode)) {
149
+ return torch::upsample_nearest3d(
150
+ input,
151
+ _interp_output_size(3, std::move(closed_over_args)),
152
+ scale_factor_list.at(0),
153
+ scale_factor_list.at(1),
154
+ scale_factor_list.at(2));
155
+ } else if (input.dim() == 3 && std::get_if<enumtype::kNearestExact>(&mode)) {
156
+ return torch::_upsample_nearest_exact1d(
157
+ input,
158
+ _interp_output_size(1, std::move(closed_over_args)),
159
+ scale_factor_list.at(0));
160
+ } else if (input.dim() == 4 && std::get_if<enumtype::kNearestExact>(&mode)) {
161
+ return torch::_upsample_nearest_exact2d(
162
+ input,
163
+ _interp_output_size(2, std::move(closed_over_args)),
164
+ scale_factor_list.at(0),
165
+ scale_factor_list.at(1));
166
+ } else if (input.dim() == 5 && std::get_if<enumtype::kNearestExact>(&mode)) {
167
+ return torch::_upsample_nearest_exact3d(
168
+ input,
169
+ _interp_output_size(3, std::move(closed_over_args)),
170
+ scale_factor_list.at(0),
171
+ scale_factor_list.at(1),
172
+ scale_factor_list.at(2));
173
+ } else if (input.dim() == 3 && std::get_if<enumtype::kArea>(&mode)) {
174
+ return detail::adaptive_avg_pool1d(
175
+ input, _interp_output_size(1, std::move(closed_over_args)));
176
+ } else if (input.dim() == 4 && std::get_if<enumtype::kArea>(&mode)) {
177
+ return detail::adaptive_avg_pool2d(
178
+ input, _interp_output_size(2, std::move(closed_over_args)));
179
+ } else if (input.dim() == 5 && std::get_if<enumtype::kArea>(&mode)) {
180
+ return detail::adaptive_avg_pool3d(
181
+ input, _interp_output_size(3, std::move(closed_over_args)));
182
+ } else if (input.dim() == 3 && std::get_if<enumtype::kLinear>(&mode)) {
183
+ TORCH_CHECK(
184
+ align_corners != c10::nullopt, "align_corners should be specified.");
185
+ return torch::upsample_linear1d(
186
+ input,
187
+ _interp_output_size(1, std::move(closed_over_args)),
188
+ *align_corners,
189
+ scale_factor_list.at(0));
190
+ } else if (input.dim() == 3 && std::get_if<enumtype::kBilinear>(&mode)) {
191
+ TORCH_CHECK(false, "Got 3D input, but bilinear mode needs 4D input");
192
+ } else if (input.dim() == 3 && std::get_if<enumtype::kTrilinear>(&mode)) {
193
+ TORCH_CHECK(false, "Got 3D input, but trilinear mode needs 5D input");
194
+ } else if (input.dim() == 4 && std::get_if<enumtype::kLinear>(&mode)) {
195
+ TORCH_CHECK(false, "Got 4D input, but linear mode needs 3D input");
196
+ } else if (input.dim() == 4 && std::get_if<enumtype::kBilinear>(&mode)) {
197
+ TORCH_CHECK(
198
+ align_corners != c10::nullopt, "align_corners should be specified.");
199
+ if (antialias) {
200
+ return torch::_upsample_bilinear2d_aa(
201
+ input,
202
+ _interp_output_size(2, std::move(closed_over_args)),
203
+ *align_corners,
204
+ scale_factor_list.at(0),
205
+ scale_factor_list.at(1));
206
+ }
207
+ return torch::upsample_bilinear2d(
208
+ input,
209
+ _interp_output_size(2, std::move(closed_over_args)),
210
+ *align_corners,
211
+ scale_factor_list.at(0),
212
+ scale_factor_list.at(1));
213
+ } else if (input.dim() == 4 && std::get_if<enumtype::kTrilinear>(&mode)) {
214
+ TORCH_CHECK(false, "Got 4D input, but trilinear mode needs 5D input");
215
+ } else if (input.dim() == 5 && std::get_if<enumtype::kLinear>(&mode)) {
216
+ TORCH_CHECK(false, "Got 5D input, but linear mode needs 3D input");
217
+ } else if (input.dim() == 5 && std::get_if<enumtype::kBilinear>(&mode)) {
218
+ TORCH_CHECK(false, "Got 5D input, but bilinear mode needs 4D input");
219
+ } else if (input.dim() == 5 && std::get_if<enumtype::kTrilinear>(&mode)) {
220
+ TORCH_CHECK(
221
+ align_corners != c10::nullopt, "align_corners should be specified.");
222
+ return torch::upsample_trilinear3d(
223
+ input,
224
+ _interp_output_size(3, std::move(closed_over_args)),
225
+ *align_corners,
226
+ scale_factor_list.at(0),
227
+ scale_factor_list.at(1),
228
+ scale_factor_list.at(2));
229
+ } else if (input.dim() == 4 && std::get_if<enumtype::kBicubic>(&mode)) {
230
+ TORCH_CHECK(
231
+ align_corners != c10::nullopt, "align_corners should be specified.");
232
+ if (antialias) {
233
+ return torch::_upsample_bicubic2d_aa(
234
+ input,
235
+ _interp_output_size(2, std::move(closed_over_args)),
236
+ *align_corners,
237
+ scale_factor_list.at(0),
238
+ scale_factor_list.at(1));
239
+ }
240
+ return torch::upsample_bicubic2d(
241
+ input,
242
+ _interp_output_size(2, std::move(closed_over_args)),
243
+ *align_corners,
244
+ scale_factor_list.at(0),
245
+ scale_factor_list.at(1));
246
+ } else {
247
+ TORCH_CHECK(
248
+ false,
249
+ "Input Error: Only 3D, 4D and 5D input Tensors supported "
250
+ "(got ",
251
+ input.dim(),
252
+ "D) for the modes: nearest | linear | bilinear | bicubic | trilinear "
253
+ "(got ",
254
+ enumtype::get_enum_name(mode),
255
+ ")");
256
+ }
257
+ }
258
+ } // namespace detail
259
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
260
+
261
+ /// See
262
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.interpolate
263
+ /// about the exact behavior of this functional.
264
+ ///
265
+ /// See the documentation for `torch::nn::functional::InterpolateFuncOptions`
266
+ /// class to learn what optional arguments are supported for this functional.
267
+ ///
268
+ /// Example:
269
+ /// ```
270
+ /// namespace F = torch::nn::functional;
271
+ /// F::interpolate(input,
272
+ /// F::InterpolateFuncOptions().size({4}).mode(torch::kNearest));
273
+ /// ```
274
+ inline Tensor interpolate(
275
+ const Tensor& input,
276
+ const InterpolateFuncOptions& options = {}) {
277
+ return detail::interpolate(
278
+ input,
279
+ options.size(),
280
+ options.scale_factor(),
281
+ options.mode(),
282
+ options.align_corners(),
283
+ options.recompute_scale_factor(),
284
+ options.antialias());
285
+ }
286
+
287
+ } // namespace functional
288
+ } // namespace nn
289
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/vision.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/options/vision.h>
4
+ #include <torch/types.h>
5
+
6
+ namespace torch {
7
+ namespace nn {
8
+ namespace functional {
9
+
10
+ inline Tensor affine_grid(
11
+ const Tensor& theta,
12
+ const IntArrayRef& size,
13
+ bool align_corners = false) {
14
+ // enforce floating point dtype on theta
15
+ TORCH_CHECK(
16
+ theta.is_floating_point(),
17
+ "Expected theta to have floating point type, but got ",
18
+ theta.dtype());
19
+
20
+ // check that shapes and sizes match
21
+ if (size.size() == 4) {
22
+ TORCH_CHECK(
23
+ theta.dim() == 3 && theta.size(-2) == 2 && theta.size(-1) == 3,
24
+ "Expected a batch of 2D affine matrices of shape Nx2x3 for size ",
25
+ size,
26
+ ". Got ",
27
+ theta.sizes(),
28
+ ".");
29
+ } else if (size.size() == 5) {
30
+ TORCH_CHECK(
31
+ theta.dim() == 3 && theta.size(-2) == 3 && theta.size(-1) == 4,
32
+ "Expected a batch of 3D affine matrices of shape Nx3x4 for size ",
33
+ size,
34
+ ". Got ",
35
+ theta.sizes(),
36
+ ".");
37
+ } else {
38
+ TORCH_CHECK(
39
+ false,
40
+ "affine_grid only supports 4D and 5D sizes, ",
41
+ "for 2D and 3D affine transforms, respectively. ",
42
+ "Got size ",
43
+ size);
44
+ }
45
+
46
+ if (*std::min_element(size.begin(), size.end()) <= 0) {
47
+ TORCH_CHECK(false, "Expected non-zero, positive output size. Got ", size);
48
+ }
49
+
50
+ return torch::affine_grid_generator(theta, size, align_corners);
51
+ }
52
+
53
+ // ============================================================================
54
+
55
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
56
+ namespace detail {
57
+ inline Tensor grid_sample(
58
+ const Tensor& input,
59
+ const Tensor& grid,
60
+ GridSampleFuncOptions::mode_t mode,
61
+ GridSampleFuncOptions::padding_mode_t padding_mode,
62
+ c10::optional<bool> align_corners) {
63
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
64
+ int64_t mode_enum, padding_mode_enum;
65
+
66
+ if (std::holds_alternative<enumtype::kBilinear>(mode)) {
67
+ mode_enum = 0;
68
+ } else if (std::holds_alternative<enumtype::kNearest>(mode)) {
69
+ mode_enum = 1;
70
+ } else { /// mode == 'bicubic'
71
+ mode_enum = 2;
72
+ }
73
+
74
+ if (std::holds_alternative<enumtype::kZeros>(padding_mode)) {
75
+ padding_mode_enum = 0;
76
+ } else if (std::holds_alternative<enumtype::kBorder>(padding_mode)) {
77
+ padding_mode_enum = 1;
78
+ } else { /// padding_mode == 'reflection'
79
+ padding_mode_enum = 2;
80
+ }
81
+
82
+ if (!align_corners.has_value()) {
83
+ TORCH_WARN(
84
+ "Default grid_sample and affine_grid behavior has changed ",
85
+ "to align_corners=False since 1.3.0. Please specify ",
86
+ "align_corners=True if the old behavior is desired. ",
87
+ "See the documentation of grid_sample for details.");
88
+ align_corners = false;
89
+ }
90
+
91
+ return torch::grid_sampler(
92
+ input, grid, mode_enum, padding_mode_enum, align_corners.value());
93
+ }
94
+ } // namespace detail
95
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
96
+
97
+ /// See
98
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.grid_sample
99
+ /// about the exact behavior of this functional.
100
+ ///
101
+ /// See the documentation for `torch::nn::functional::GridSampleFuncOptions`
102
+ /// class to learn what optional arguments are supported for this functional.
103
+ ///
104
+ /// Example:
105
+ /// ```
106
+ /// namespace F = torch::nn::functional;
107
+ /// F::grid_sample(input, grid,
108
+ /// F::GridSampleFuncOptions().mode(torch::kBilinear).padding_mode(torch::kZeros).align_corners(true));
109
+ /// ```
110
+ inline Tensor grid_sample(
111
+ const Tensor& input,
112
+ const Tensor& grid,
113
+ const GridSampleFuncOptions& options = {}) {
114
+ return detail::grid_sample(
115
+ input,
116
+ grid,
117
+ options.mode(),
118
+ options.padding_mode(),
119
+ options.align_corners());
120
+ }
121
+
122
+ } // namespace functional
123
+ } // namespace nn
124
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/enum.h>
5
+ #include <torch/types.h>
6
+
7
+ namespace torch {
8
+ namespace nn {
9
+ namespace init {
10
+
11
+ using NonlinearityType = std::variant<
12
+ enumtype::kLinear,
13
+ enumtype::kConv1D,
14
+ enumtype::kConv2D,
15
+ enumtype::kConv3D,
16
+ enumtype::kConvTranspose1D,
17
+ enumtype::kConvTranspose2D,
18
+ enumtype::kConvTranspose3D,
19
+ enumtype::kSigmoid,
20
+ enumtype::kTanh,
21
+ enumtype::kReLU,
22
+ enumtype::kLeakyReLU>;
23
+
24
+ using FanModeType = std::variant<enumtype::kFanIn, enumtype::kFanOut>;
25
+
26
+ } // namespace init
27
+ } // namespace nn
28
+
29
+ namespace nn {
30
+ namespace init {
31
+
32
+ /// Return the recommended gain value for the given nonlinearity function.
33
+ TORCH_API double calculate_gain(
34
+ NonlinearityType nonlinearity,
35
+ double param = 0.01);
36
+
37
+ /// Fills the given `tensor` with the provided `value` in-place, and returns it.
38
+ /// No gradient will be recorded for this operation.
39
+ TORCH_API Tensor constant_(Tensor tensor, Scalar value);
40
+
41
+ /// Fills the given `tensor` with the Dirac delta function in-place, and returns
42
+ /// it. No gradient will be recorded for this operation.
43
+ TORCH_API Tensor dirac_(Tensor tensor);
44
+
45
+ /// Fills the given 2-dimensional `matrix` with an identity matrix.
46
+ /// No gradient will be recorded for this operation.
47
+ TORCH_API Tensor eye_(Tensor matrix);
48
+
49
+ /// Fills the given 2-dimensional `matrix` with values drawn from a normal
50
+ /// distribution parameterized by `mean` and `std`.
51
+ /// No gradient will be recorded for this operation.
52
+ TORCH_API Tensor normal_(Tensor tensor, double mean = 0, double std = 1);
53
+
54
+ /// Fills the given `tensor` with ones.
55
+ /// No gradient will be recorded for this operation.
56
+ TORCH_API Tensor ones_(Tensor tensor);
57
+
58
+ /// Fills the input `Tensor` with a (semi) orthogonal matrix, as described in
59
+ /// "Exact solutions to the nonlinear dynamics of learning in deep linear neural
60
+ /// networks" - Saxe, A. et al. (2013). The input tensor must have at least 2
61
+ /// dimensions, and for tensors with more than 2 dimensions the trailing
62
+ /// dimensions are flattened.
63
+ /// No gradient will be recorded for this operation.
64
+ TORCH_API Tensor orthogonal_(Tensor tensor, double gain = 1.0);
65
+
66
+ /// Fills the 2D input `Tensor` as a sparse matrix, where the
67
+ /// non-zero elements will be drawn from a centered normal distribution
68
+ /// with the given standard deviation `std`, as described in "Deep learning via
69
+ /// Hessian-free optimization" - Martens, J. (2010). The `sparsity` is a real
70
+ /// value between 0 and 1 that controls the fraction of elements in each column
71
+ /// to be set to zero.
72
+ /// No gradient will be recorded for this operation.
73
+ TORCH_API Tensor sparse_(Tensor tensor, double sparsity, double std = 0.01);
74
+
75
+ /// Fills the given 2-dimensional `matrix` with values drawn from a uniform
76
+ /// distribution parameterized by `low` and `high`.
77
+ /// No gradient will be recorded for this operation.
78
+ TORCH_API Tensor uniform_(Tensor tensor, double low = 0, double high = 1);
79
+
80
+ /// Fills the input `Tensor` with values according to the method
81
+ /// described in "Delving deep into rectifiers: Surpassing human-level
82
+ /// performance on ImageNet classification" - He, K. et al. (2015), using a
83
+ /// normal distribution. Also known as He initialization.
84
+ /// No gradient will be recorded for this operation.
85
+ TORCH_API Tensor kaiming_normal_(
86
+ Tensor tensor,
87
+ double a = 0,
88
+ FanModeType mode = torch::kFanIn,
89
+ NonlinearityType nonlinearity = torch::kLeakyReLU);
90
+
91
+ /// Fills the input `Tensor` with values according to the method
92
+ /// described in "Delving deep into rectifiers: Surpassing human-level
93
+ /// performance on ImageNet classification" - He, K. et al. (2015), using a
94
+ /// uniform distribution. Also known as He initialization.
95
+ /// No gradient will be recorded for this operation.
96
+ TORCH_API Tensor kaiming_uniform_(
97
+ Tensor tensor,
98
+ double a = 0,
99
+ FanModeType mode = torch::kFanIn,
100
+ NonlinearityType nonlinearity = torch::kLeakyReLU);
101
+
102
+ /// Fills the input `Tensor` with values according to the method
103
+ /// described in "Understanding the difficulty of training deep feedforward
104
+ /// neural networks" - Glorot, X. & Bengio, Y. (2010). Values are scaled by the
105
+ /// `gain` parameter. No gradient will be recorded for this operation.
106
+ TORCH_API Tensor xavier_normal_(Tensor tensor, double gain = 1.0);
107
+
108
+ /// Fills the input `Tensor` with values according to the method
109
+ /// described in "Understanding the difficulty of training deep feedforward
110
+ /// neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform
111
+ /// distribution. Values are scaled by the `gain` parameter
112
+ /// No gradient will be recorded for this operation.
113
+ TORCH_API Tensor xavier_uniform_(Tensor tensor, double gain = 1.0);
114
+
115
+ /// Fills the given `tensor` with zeros.
116
+ /// No gradient will be recorded for this operation.
117
+ TORCH_API Tensor zeros_(Tensor tensor);
118
+
119
+ TORCH_API std::tuple<int64_t, int64_t> _calculate_fan_in_and_fan_out(
120
+ const Tensor& tensor);
121
+
122
+ } // namespace init
123
+ } // namespace nn
124
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h ADDED
@@ -0,0 +1,702 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/modules/container/any_module_holder.h>
4
+ #include <torch/nn/modules/container/any_value.h>
5
+ #include <torch/nn/pimpl.h>
6
+ #include <torch/ordered_dict.h>
7
+ #include <torch/serialize/archive.h>
8
+ #include <torch/types.h>
9
+
10
+ #include <ATen/ATen.h>
11
+
12
+ #include <functional>
13
+ #include <iosfwd>
14
+ #include <map>
15
+ #include <memory>
16
+ #include <string>
17
+ #include <type_traits>
18
+
19
+ namespace torch {
20
+ namespace nn {
21
+
22
+ /// The base class for all modules in PyTorch.
23
+ ///
24
+ /// \rst
25
+ /// .. note::
26
+ /// The design and implementation of this class is largely based on the Python
27
+ /// API. You may want to consult the python documentation for
28
+ /// :py:class:`pytorch:torch.nn.Module` for further clarification on certain
29
+ /// methods or behavior.
30
+ /// \endrst
31
+ ///
32
+ /// A `Module` is an abstraction over the implementation of some function or
33
+ /// algorithm, possibly associated with some persistent data. A `Module` may
34
+ /// contain further `Module`s ("submodules"), each with their own
35
+ /// implementation, persistent data and further submodules. `Module`s can thus
36
+ /// be said to form a recursive tree structure. A `Module` is registered as a
37
+ /// submodule to another `Module` by calling `register_module()`, typically from
38
+ /// within a parent module's constructor.
39
+ ///
40
+ /// A distinction is made between three kinds of persistent data that may be
41
+ /// associated with a `Module`:
42
+ ///
43
+ /// 1. *Parameters*: tensors that record gradients, typically weights updated
44
+ /// during the backward step (e.g. the `weight` of a `Linear` module),
45
+ /// 2. *Buffers*: tensors that do not record gradients, typically updated during
46
+ /// the forward step, such as running statistics (e.g. `mean` and `variance`
47
+ /// in the `BatchNorm` module),
48
+ /// 3. Any additional state, not necessarily tensors, required for the
49
+ /// implementation or configuration of a `Module`.
50
+ ///
51
+ /// The first two kinds of state are special in that they may be registered
52
+ /// with the `Module` system to allow convenient access and batch configuration.
53
+ /// For example, registered parameters in any `Module` may be iterated over via
54
+ /// the `parameters()` accessor. Further, changing the data type of a `Module`'s
55
+ /// registered parameters can be done conveniently via `Module::to()`, e.g.
56
+ /// `module->to(torch::kCUDA)` to move all parameters to GPU memory. Lastly,
57
+ /// registered parameters and buffers are handled specially during a `clone()`
58
+ /// operation, which performs a deepcopy of a cloneable `Module` hierarchy.
59
+ ///
60
+ /// Parameters are registered with a `Module` via `register_parameter`. Buffers
61
+ /// are registered separately via `register_buffer`. These methods are part of
62
+ /// the public API of `Module` and are typically invoked from within a
63
+ /// concrete `Module`s constructor.
64
+ class TORCH_API Module : public std::enable_shared_from_this<Module> {
65
+ public:
66
+ using ModuleApplyFunction = std::function<void(Module&)>;
67
+ using ConstModuleApplyFunction = std::function<void(const Module&)>;
68
+ using NamedModuleApplyFunction =
69
+ std::function<void(const std::string&, Module&)>;
70
+ using ConstNamedModuleApplyFunction =
71
+ std::function<void(const std::string&, const Module&)>;
72
+ using ModulePointerApplyFunction =
73
+ std::function<void(const std::shared_ptr<Module>&)>;
74
+ using NamedModulePointerApplyFunction =
75
+ std::function<void(const std::string&, const std::shared_ptr<Module>&)>;
76
+
77
+ /// Tells the base `Module` about the name of the submodule.
78
+ explicit Module(std::string name);
79
+
80
+ /// Constructs the module without immediate knowledge of the submodule's name.
81
+ /// The name of the submodule is inferred via RTTI (if possible) the first
82
+ /// time `.name()` is invoked.
83
+ Module();
84
+ Module(const Module&) = default;
85
+ Module& operator=(const Module&) = default;
86
+ Module(Module&&) noexcept = default;
87
+ Module& operator=(Module&&) noexcept = default;
88
+
89
+ virtual ~Module() = default;
90
+
91
+ /// Returns the name of the `Module`.
92
+ ///
93
+ /// A `Module` has an associated `name`, which is a string representation of
94
+ /// the kind of concrete `Module` it represents, such as `"Linear"` for the
95
+ /// `Linear` module. Under most circumstances, this name is automatically
96
+ /// inferred via runtime type information (RTTI). In the unusual circumstance
97
+ /// that you have this feature disabled, you may want to manually name your
98
+ /// `Module`s by passing the string name to the `Module` base class'
99
+ /// constructor.
100
+ const std::string& name() const noexcept;
101
+
102
+ /// Performs a recursive deep copy of the module and all its registered
103
+ /// parameters, buffers and submodules.
104
+ ///
105
+ /// Optionally, this method sets the current device
106
+ /// to the one supplied before cloning. If no device is given, each
107
+ /// parameter and buffer will be moved to the device of its source.
108
+ ///
109
+ /// \rst
110
+ /// .. attention::
111
+ /// Attempting to call the `clone()` method inherited from the base `Module`
112
+ /// class (the one documented here) will fail. To inherit an actual
113
+ /// implementation of `clone()`, you must subclass `Cloneable`. `Cloneable`
114
+ /// is templatized on the concrete module type, and can thus properly copy a
115
+ /// `Module`. This method is provided on the base class' API solely for an
116
+ /// easier-to-use polymorphic interface.
117
+ /// \endrst
118
+ virtual std::shared_ptr<Module> clone(
119
+ const optional<Device>& device = nullopt) const;
120
+
121
+ /// Applies the `function` to the `Module` and recursively to every submodule.
122
+ /// The function must accept a `Module&`.
123
+ ///
124
+ /// \rst
125
+ /// .. code-block:: cpp
126
+ /// MyModule module;
127
+ /// module->apply([](nn::Module& module) {
128
+ /// std::cout << module.name() << std::endl;
129
+ /// });
130
+ /// \endrst
131
+ void apply(const ModuleApplyFunction& function);
132
+
133
+ /// Applies the `function` to the `Module` and recursively to every submodule.
134
+ /// The function must accept a `const Module&`.
135
+ ///
136
+ /// \rst
137
+ /// .. code-block:: cpp
138
+ /// MyModule module;
139
+ /// module->apply([](const nn::Module& module) {
140
+ /// std::cout << module.name() << std::endl;
141
+ /// });
142
+ /// \endrst
143
+ void apply(const ConstModuleApplyFunction& function) const;
144
+
145
+ /// Applies the `function` to the `Module` and recursively to every submodule.
146
+ /// The function must accept a `const std::string&` for the key of the module,
147
+ /// and a `Module&`. The key of the module itself is the empty string. If
148
+ /// `name_prefix` is given, it is prepended to every key as
149
+ /// `<name_prefix>.<key>` (and just `name_prefix` for the module itself).
150
+ ///
151
+ /// \rst
152
+ /// .. code-block:: cpp
153
+ /// MyModule module;
154
+ /// module->apply([](const std::string& key, nn::Module& module) {
155
+ /// std::cout << key << ": " << module.name() << std::endl;
156
+ /// });
157
+ /// \endrst
158
+ void apply(
159
+ const NamedModuleApplyFunction& function,
160
+ const std::string& name_prefix = std::string());
161
+
162
+ /// Applies the `function` to the `Module` and recursively to every submodule.
163
+ /// The function must accept a `const std::string&` for the key of the module,
164
+ /// and a `const Module&`. The key of the module itself is the empty string.
165
+ /// If `name_prefix` is given, it is prepended to every key as
166
+ /// `<name_prefix>.<key>` (and just `name_prefix` for the module itself).
167
+ ///
168
+ /// \rst
169
+ /// .. code-block:: cpp
170
+ /// MyModule module;
171
+ /// module->apply([](const std::string& key, const nn::Module& module) {
172
+ /// std::cout << key << ": " << module.name() << std::endl;
173
+ /// });
174
+ /// \endrst
175
+ void apply(
176
+ const ConstNamedModuleApplyFunction& function,
177
+ const std::string& name_prefix = std::string()) const;
178
+
179
+ /// Applies the `function` to the `Module` and recursively to every submodule.
180
+ /// The function must accept a `const std::shared_ptr<Module>&`.
181
+ ///
182
+ /// \rst
183
+ /// .. code-block:: cpp
184
+ /// MyModule module;
185
+ /// module->apply([](const std::shared_ptr<nn::Module>& module) {
186
+ /// std::cout << module->name() << std::endl;
187
+ /// });
188
+ /// \endrst
189
+ void apply(const ModulePointerApplyFunction& function) const;
190
+
191
+ /// Applies the `function` to the `Module` and recursively to every submodule.
192
+ /// The function must accept a `const std::string&` for the key of the module,
193
+ /// and a `const std::shared_ptr<Module>&`. The key of the module itself is
194
+ /// the empty string. If `name_prefix` is given, it is prepended to every key
195
+ /// as
196
+ /// `<name_prefix>.<key>` (and just `name_prefix` for the module itself).
197
+ ///
198
+ /// \rst
199
+ /// .. code-block:: cpp
200
+ /// MyModule module;
201
+ /// module->apply([](const std::string& key,
202
+ /// const std::shared_ptr<nn::Module>& module) {
203
+ /// std::cout << key << ": " << module->name() << std::endl;
204
+ /// });
205
+ /// \endrst
206
+ void apply(
207
+ const NamedModulePointerApplyFunction& function,
208
+ const std::string& name_prefix = std::string()) const;
209
+
210
+ /// Returns the parameters of this `Module` and if `recurse` is true, also
211
+ /// recursively of every submodule.
212
+ std::vector<Tensor> parameters(bool recurse = true) const;
213
+
214
+ /// Returns an `OrderedDict` with the parameters of this `Module` along with
215
+ /// their keys, and if `recurse` is true also recursively of every submodule.
216
+ OrderedDict<std::string, Tensor> named_parameters(bool recurse = true) const;
217
+
218
+ /// Returns the buffers of this `Module` and if `recurse` is true, also
219
+ /// recursively of every submodule.
220
+ std::vector<Tensor> buffers(bool recurse = true) const;
221
+
222
+ /// Returns an `OrderedDict` with the buffers of this `Module` along with
223
+ /// their keys, and if `recurse` is true also recursively of every submodule.
224
+ OrderedDict<std::string, Tensor> named_buffers(bool recurse = true) const;
225
+
226
+ /// Returns the submodules of this `Module` (the entire submodule hierarchy)
227
+ /// and if `include_self` is true, also inserts a `shared_ptr` to this module
228
+ /// in the first position.
229
+ ///
230
+ /// \rst
231
+ /// .. warning::
232
+ /// Only pass `include_self` as `true` if this `Module` is stored in a
233
+ /// `shared_ptr`! Otherwise an exception will be thrown. You may still call
234
+ /// this method with `include_self` set to false if your `Module` is not
235
+ /// stored in a `shared_ptr`.
236
+ /// \endrst
237
+ std::vector<std::shared_ptr<Module>> modules(bool include_self = true) const;
238
+
239
+ /// Returns an `OrderedDict` of the submodules of this `Module` (the entire
240
+ /// submodule hierarchy) and their keys, and if `include_self` is true, also
241
+ /// inserts a `shared_ptr` to this module in the first position. If
242
+ /// `name_prefix` is given, it is prepended to every key as
243
+ /// `<name_prefix>.<key>` (and just `name_prefix` for the module itself).
244
+ ///
245
+ /// \rst
246
+ /// .. warning::
247
+ /// Only pass `include_self` as `true` if this `Module` is stored in a
248
+ /// `shared_ptr`! Otherwise an exception will be thrown. You may still call
249
+ /// this method with `include_self` set to false if your `Module` is not
250
+ /// stored in a `shared_ptr`.
251
+ /// \endrst
252
+ OrderedDict<std::string, std::shared_ptr<Module>> named_modules(
253
+ const std::string& name_prefix = std::string(),
254
+ bool include_self = true) const;
255
+
256
+ /// Returns the direct submodules of this `Module`.
257
+ std::vector<std::shared_ptr<Module>> children() const;
258
+
259
+ /// Returns an `OrderedDict` of the direct submodules of this `Module` and
260
+ /// their keys.
261
+ OrderedDict<std::string, std::shared_ptr<Module>> named_children() const;
262
+
263
+ /// Enables "training" mode.
264
+ virtual void train(bool on = true);
265
+
266
+ /// Calls train(false) to enable "eval" mode.
267
+ /// Do not override this method, override `train()` instead.
268
+ void eval();
269
+
270
+ /// True if the module is in training mode.
271
+ ///
272
+ /// Every `Module` has a boolean associated with it that determines whether
273
+ /// the `Module` is currently in *training* mode (set via `.train()`) or in
274
+ /// *evaluation* (inference) mode (set via `.eval()`). This property is
275
+ /// exposed via `is_training()`, and may be used by the implementation of a
276
+ /// concrete module to modify its runtime behavior. See the `BatchNorm` or
277
+ /// `Dropout` modules for examples of `Module`s that use different code paths
278
+ /// depending on this property.
279
+ virtual bool is_training() const noexcept;
280
+
281
+ /// Recursively casts all parameters to the given `dtype` and `device`.
282
+ ///
283
+ /// If `non_blocking` is true and the source is in pinned memory and
284
+ /// destination is on the GPU or vice versa, the copy is performed
285
+ /// asynchronously with respect to the host. Otherwise, the argument has no
286
+ /// effect.
287
+ virtual void to(
288
+ torch::Device device,
289
+ torch::Dtype dtype,
290
+ bool non_blocking = false);
291
+
292
+ /// Recursively casts all parameters to the given dtype.
293
+ ///
294
+ /// If `non_blocking` is true and the source is in pinned memory and
295
+ /// destination is on the GPU or vice versa, the copy is performed
296
+ /// asynchronously with respect to the host. Otherwise, the argument has no
297
+ /// effect.
298
+ virtual void to(torch::Dtype dtype, bool non_blocking = false);
299
+
300
+ /// Recursively moves all parameters to the given device.
301
+ ///
302
+ /// If `non_blocking` is true and the source is in pinned memory and
303
+ /// destination is on the GPU or vice versa, the copy is performed
304
+ /// asynchronously with respect to the host. Otherwise, the argument has no
305
+ /// effect.
306
+ virtual void to(torch::Device device, bool non_blocking = false);
307
+
308
+ /// Recursively zeros out the `grad` value of each registered parameter.
309
+ virtual void zero_grad(bool set_to_none = true);
310
+
311
+ /// Attempts to cast this `Module` to the given `ModuleType`.
312
+ ///
313
+ /// This method is useful when calling `apply()`.
314
+ /// \rst
315
+ /// .. code-block:: cpp
316
+ ///
317
+ /// void initialize_weights(nn::Module& module) {
318
+ /// torch::NoGradGuard no_grad;
319
+ /// if (auto* linear = module.as<nn::Linear>()) {
320
+ /// linear->weight.normal_(0.0, 0.02);
321
+ /// }
322
+ /// }
323
+ ///
324
+ /// MyModule module;
325
+ /// module->apply(initialize_weights);
326
+ /// \endrst
327
+ template <typename ModuleType>
328
+ typename ModuleType::ContainedType* as() noexcept;
329
+
330
+ /// Attempts to cast this `Module` to the given `ModuleType`.
331
+ ///
332
+ /// This method is useful when calling `apply()`.
333
+ /// \rst
334
+ /// .. code-block:: cpp
335
+ /// void initialize_weights(nn::Module& module) {
336
+ /// torch::NoGradGuard no_grad;
337
+ /// if (auto* linear = module.as<nn::Linear>()) {
338
+ /// linear->weight.normal_(0.0, 0.02);
339
+ /// }
340
+ /// }
341
+ ///
342
+ /// MyModule module;
343
+ /// module->apply(initialize_weights);
344
+ /// \endrst
345
+ template <typename ModuleType>
346
+ const typename ModuleType::ContainedType* as() const noexcept;
347
+
348
+ /// Attempts to cast this `Module` to the given `ModuleType`.
349
+ ///
350
+ /// This method is useful when calling `apply()`.
351
+ /// \rst
352
+ /// .. code-block:: cpp
353
+ ///
354
+ /// void initialize_weights(nn::Module& module) {
355
+ /// torch::NoGradGuard no_grad;
356
+ /// if (auto* linear = module.as<nn::Linear>()) {
357
+ /// linear->weight.normal_(0.0, 0.02);
358
+ /// }
359
+ /// }
360
+ ///
361
+ /// MyModule module;
362
+ /// module.apply(initialize_weights);
363
+ /// \endrst
364
+ template <
365
+ typename ModuleType,
366
+ typename = torch::detail::disable_if_module_holder_t<ModuleType>>
367
+ ModuleType* as() noexcept;
368
+
369
+ /// Attempts to cast this `Module` to the given `ModuleType`.
370
+ ///
371
+ /// This method is useful when calling `apply()`.
372
+ /// \rst
373
+ /// .. code-block:: cpp
374
+ ///
375
+ /// void initialize_weights(nn::Module& module) {
376
+ /// torch::NoGradGuard no_grad;
377
+ /// if (auto* linear = module.as<nn::Linear>()) {
378
+ /// linear->weight.normal_(0.0, 0.02);
379
+ /// }
380
+ /// }
381
+ ///
382
+ /// MyModule module;
383
+ /// module.apply(initialize_weights);
384
+ /// \endrst
385
+ template <
386
+ typename ModuleType,
387
+ typename = torch::detail::disable_if_module_holder_t<ModuleType>>
388
+ const ModuleType* as() const noexcept;
389
+
390
+ /// Serializes the `Module` into the given `OutputArchive`.
391
+ ///
392
+ /// If the `Module` contains unserializable submodules (e.g.
393
+ /// `nn::Functional`), those submodules are skipped when serializing.
394
+ virtual void save(serialize::OutputArchive& archive) const;
395
+
396
+ /// Deserializes the `Module` from the given `InputArchive`.
397
+ ///
398
+ /// If the `Module` contains unserializable submodules (e.g.
399
+ /// `nn::Functional`), we don't check the existence of those submodules in the
400
+ /// `InputArchive` when deserializing.
401
+ virtual void load(serialize::InputArchive& archive);
402
+
403
+ /// Streams a pretty representation of the `Module` into the given `stream`.
404
+ /// By default, this representation will be the name of the module (taken from
405
+ /// `name()`), followed by a recursive pretty print of all of the `Module`'s
406
+ /// submodules.
407
+ ///
408
+ /// Override this method to change the pretty print. The input
409
+ /// `stream` should be returned from the method, to allow easy chaining.
410
+ virtual void pretty_print(std::ostream& stream) const;
411
+
412
+ /// Returns whether the `Module` is serializable.
413
+ virtual bool is_serializable() const;
414
+
415
+ /// Registers a parameter with this `Module`.
416
+ ///
417
+ /// A parameter should be any gradient-recording tensor used in the
418
+ /// implementation of your `Module`. Registering it makes it available to
419
+ /// methods such as `parameters()`, `clone()` or `to().`
420
+ ///
421
+ /// Note that registering an undefined Tensor (e.g.
422
+ /// `module.register_parameter("param", Tensor())`) is allowed, and is
423
+ /// equivalent to `module.register_parameter("param", None)` in Python API.
424
+ ///
425
+ /// \rst
426
+ /// .. code-block:: cpp
427
+ ///
428
+ /// MyModule::MyModule() {
429
+ /// weight_ = register_parameter("weight", torch::randn({A, B}));
430
+ /// }
431
+ /// \endrst
432
+ Tensor& register_parameter(
433
+ std::string name,
434
+ Tensor tensor,
435
+ bool requires_grad = true);
436
+
437
+ /// Registers a buffer with this `Module`.
438
+ ///
439
+ /// A buffer is intended to be state in your module that does not record
440
+ /// gradients, such as running statistics. Registering it makes it available
441
+ /// to methods such as `buffers()`, `clone()` or `to().
442
+ ///
443
+ /// \rst
444
+ /// .. code-block:: cpp
445
+ ///
446
+ /// MyModule::MyModule() {
447
+ /// mean_ = register_buffer("mean", torch::empty({num_features_}));
448
+ /// }
449
+ /// \endrst
450
+ Tensor& register_buffer(std::string name, Tensor tensor);
451
+
452
+ /// Registers a submodule with this `Module`.
453
+ ///
454
+ /// Registering a module makes it available to methods such as `modules()`,
455
+ /// `clone()` or `to()`.
456
+ ///
457
+ /// \rst
458
+ /// .. code-block:: cpp
459
+ ///
460
+ /// MyModule::MyModule() {
461
+ /// submodule_ = register_module("linear", torch::nn::Linear(3, 4));
462
+ /// }
463
+ /// \endrst
464
+ template <typename ModuleType>
465
+ std::shared_ptr<ModuleType> register_module(
466
+ std::string name,
467
+ std::shared_ptr<ModuleType> module);
468
+
469
+ /// Registers a submodule with this `Module`.
470
+ ///
471
+ /// This method deals with `ModuleHolder`s.
472
+ ///
473
+ /// Registering a module makes it available to methods such as `modules()`,
474
+ /// `clone()` or `to()`.
475
+ ///
476
+ /// \rst
477
+ /// .. code-block:: cpp
478
+ ///
479
+ /// MyModule::MyModule() {
480
+ /// submodule_ = register_module("linear", torch::nn::Linear(3, 4));
481
+ /// }
482
+ /// \endrst
483
+ template <typename ModuleType>
484
+ std::shared_ptr<ModuleType> register_module(
485
+ std::string name,
486
+ ModuleHolder<ModuleType> module_holder);
487
+
488
+ /// Replaces a registered submodule with this `Module`.
489
+ ///
490
+ /// This takes care of the registration, if you used submodule members, you
491
+ /// should
492
+ // assign the submodule as well, i.e. use as
493
+ /// module->submodule_ = module->replace_module("linear",
494
+ /// torch::nn::Linear(3, 4));
495
+ /// It only works when a module of the name is already registered.
496
+ ///
497
+ /// This is useful for replacing a module after initialization, e.g.
498
+ /// for finetuning.
499
+ template <typename ModuleType>
500
+ std::shared_ptr<ModuleType> replace_module(
501
+ const std::string& name,
502
+ std::shared_ptr<ModuleType> module);
503
+
504
+ /// Replaces a registered submodule with this `Module`.
505
+ /// This method deals with `ModuleHolder`s.
506
+ ///
507
+ /// This takes care of the registration, if you used submodule members, you
508
+ /// should
509
+ // assign the submodule as well, i.e. use as
510
+ /// module->submodule_ = module->replace_module("linear", linear_holder);
511
+ /// It only works when a module of the name is already registered.
512
+ ///
513
+ /// This is useful for replacing a module after initialization, e.g.
514
+ /// for finetuning.
515
+ template <typename ModuleType>
516
+ std::shared_ptr<ModuleType> replace_module(
517
+ const std::string& name,
518
+ ModuleHolder<ModuleType> module_holder);
519
+
520
+ /// Unregisters a submodule from this `Module`. If there is no such module
521
+ /// with `name` an exception is thrown.
522
+ void unregister_module(const std::string& name);
523
+
524
+ protected:
525
+ /// The following three functions allow a module with default arguments in its
526
+ /// forward method to be used in a Sequential module.
527
+ /// You should NEVER override these functions manually. Instead, you should
528
+ /// use the `FORWARD_HAS_DEFAULT_ARGS` macro.
529
+ virtual bool _forward_has_default_args() {
530
+ return false;
531
+ }
532
+
533
+ virtual unsigned int _forward_num_required_args() {
534
+ TORCH_CHECK(
535
+ false,
536
+ "torch::nn::Module subclass that has default arguments in `forward` method ",
537
+ "must override `_forward_num_required_args` method. Please use ",
538
+ "`FORWARD_HAS_DEFAULT_ARGS` macro to do so.");
539
+ }
540
+
541
+ virtual std::vector<AnyValue> _forward_populate_default_args(
542
+ std::vector<AnyValue>&& arguments) {
543
+ TORCH_CHECK(
544
+ false,
545
+ "torch::nn::Module subclass that has default arguments in `forward` method ",
546
+ "must override `_forward_populate_default_args` method. Please use ",
547
+ "`FORWARD_HAS_DEFAULT_ARGS` macro to do so.");
548
+ }
549
+
550
+ /// The registered parameters of this `Module`.
551
+ /// Inorder to access parameters_ in ParameterDict and ParameterList
552
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
553
+ OrderedDict<std::string, Tensor> parameters_;
554
+
555
+ private:
556
+ // Friend classes.
557
+
558
+ template <typename Derived>
559
+ friend class Cloneable;
560
+
561
+ template <typename ModuleType, typename... ArgumentTypes>
562
+ friend struct AnyModuleHolder;
563
+
564
+ /// Pretty prints the given `Module` into the `ostream`.
565
+ TORCH_API friend std::ostream& operator<<(
566
+ std::ostream& stream,
567
+ const nn::Module& module);
568
+
569
+ // data parallel using this method to configure gradient edges during the
570
+ // replicate step.
571
+ template <typename ModuleType>
572
+ friend void replicate_grad_edges(
573
+ const std::shared_ptr<Module>& module,
574
+ const std::vector<std::shared_ptr<ModuleType>>& replicas,
575
+ const std::vector<Device>& devices);
576
+
577
+ // Private methods.
578
+
579
+ /// Used in the implementation of `Cloneable`.
580
+ virtual void clone_(Module& other, const optional<Device>& device);
581
+
582
+ /// The implementation of the various `to()` methods.
583
+ template <typename... Ts>
584
+ void to_impl(Ts&&... ts);
585
+
586
+ /// Implements pretty printing the module hierarchy.
587
+ void pretty_print_recursive(
588
+ std::ostream& stream,
589
+ const std::string& indentation) const;
590
+
591
+ /// Applies the `function` to every submodule recursively, starting at this
592
+ /// `Module`'s children (thus not including the module itself).
593
+ void apply_to_submodules(
594
+ const NamedModulePointerApplyFunction& function,
595
+ const std::string& name_prefix = std::string()) const;
596
+
597
+ /// Returns a shared_ptr to `this` in a safe (checked) way.
598
+ std::shared_ptr<Module> shared_from_this_checked() const;
599
+
600
+ /// The registered buffers of this `Module`.
601
+ OrderedDict<std::string, Tensor> buffers_;
602
+
603
+ /// The registered (direct) submodules of this `Module`.
604
+ OrderedDict<std::string, std::shared_ptr<Module>> children_;
605
+
606
+ /// The module's name (e.g. "LSTM").
607
+ mutable optional<std::string> name_;
608
+
609
+ /// Whether the module is in training mode.
610
+ bool is_training_{true};
611
+ };
612
+
613
+ /// Serialize a `Module` pointer into an `OutputArchive`.
614
+ TORCH_API serialize::OutputArchive& operator<<(
615
+ serialize::OutputArchive& archive,
616
+ const std::shared_ptr<nn::Module>& module);
617
+
618
+ /// Deserializes a `Module` from an `InputArchive`.
619
+ TORCH_API serialize::InputArchive& operator>>(
620
+ serialize::InputArchive& archive,
621
+ const std::shared_ptr<nn::Module>& module);
622
+
623
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nn::Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
624
+
625
+ template <typename ModuleType>
626
+ typename ModuleType::ContainedType* Module::as() noexcept {
627
+ // Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for
628
+ // `Linear`, since `LinearImpl` inherits `nn::Module`.
629
+ return as<typename ModuleType::ContainedType>();
630
+ }
631
+
632
+ template <typename ModuleType>
633
+ const typename ModuleType::ContainedType* Module::as() const noexcept {
634
+ // Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for
635
+ // `Linear`, since `LinearImpl` inherits `nn::Module`.
636
+ return as<typename ModuleType::ContainedType>();
637
+ }
638
+
639
+ template <typename ModuleType, typename>
640
+ ModuleType* Module::as() noexcept {
641
+ return dynamic_cast<ModuleType*>(this);
642
+ }
643
+
644
+ template <typename ModuleType, typename>
645
+ const ModuleType* Module::as() const noexcept {
646
+ return dynamic_cast<const ModuleType*>(this);
647
+ }
648
+
649
+ template <typename ModuleType>
650
+ std::shared_ptr<ModuleType> Module::register_module(
651
+ std::string name,
652
+ std::shared_ptr<ModuleType> module) {
653
+ TORCH_CHECK(!name.empty(), "Submodule name must not be empty");
654
+ TORCH_CHECK(
655
+ name.find('.') == std::string::npos,
656
+ "Submodule name must not contain a dot (got '",
657
+ name,
658
+ "')");
659
+ auto& base_module = children_.insert(std::move(name), std::move(module));
660
+ return std::dynamic_pointer_cast<ModuleType>(base_module);
661
+ }
662
+
663
+ template <typename ModuleType>
664
+ std::shared_ptr<ModuleType> Module::register_module(
665
+ std::string name,
666
+ ModuleHolder<ModuleType> module_holder) {
667
+ return register_module(std::move(name), module_holder.ptr());
668
+ }
669
+
670
+ template <typename ModuleType>
671
+ std::shared_ptr<ModuleType> Module::replace_module(
672
+ const std::string& name,
673
+ std::shared_ptr<ModuleType> module) {
674
+ auto& base_module = (children_[name] = std::move(module));
675
+ return std::dynamic_pointer_cast<ModuleType>(base_module);
676
+ }
677
+
678
+ template <typename ModuleType>
679
+ std::shared_ptr<ModuleType> Module::replace_module(
680
+ const std::string& name,
681
+ ModuleHolder<ModuleType> module_holder) {
682
+ return replace_module(name, module_holder.ptr());
683
+ }
684
+
685
+ template <typename... Ts>
686
+ void Module::to_impl(Ts&&... ts) {
687
+ // First call `to()` on every child module.
688
+ for (auto& child : children_) {
689
+ child.value()->to(ts...);
690
+ }
691
+ // Then move every parameter to the new dtype/device.
692
+ for (auto& parameter : named_parameters(/*recurse=*/false)) {
693
+ parameter->set_data(autograd::Variable(*parameter).to(ts...));
694
+ }
695
+ // Then move every buffer to the new dtype/device.
696
+ for (auto& buffer : named_buffers(/*recurse=*/false)) {
697
+ buffer->set_data(autograd::Variable(*buffer).to(ts...));
698
+ }
699
+ }
700
+
701
+ } // namespace nn
702
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Common
4
+ #include <torch/nn/modules/common.h>
5
+
6
+ // Containers
7
+ #include <torch/nn/modules/container/any.h>
8
+ #include <torch/nn/modules/container/functional.h>
9
+ #include <torch/nn/modules/container/moduledict.h>
10
+ #include <torch/nn/modules/container/modulelist.h>
11
+ #include <torch/nn/modules/container/named_any.h>
12
+ #include <torch/nn/modules/container/parameterdict.h>
13
+ #include <torch/nn/modules/container/parameterlist.h>
14
+ #include <torch/nn/modules/container/sequential.h>
15
+
16
+ // Layers
17
+ #include <torch/nn/modules/activation.h>
18
+ #include <torch/nn/modules/adaptive.h>
19
+ #include <torch/nn/modules/batchnorm.h>
20
+ #include <torch/nn/modules/conv.h>
21
+ #include <torch/nn/modules/distance.h>
22
+ #include <torch/nn/modules/dropout.h>
23
+ #include <torch/nn/modules/embedding.h>
24
+ #include <torch/nn/modules/fold.h>
25
+ #include <torch/nn/modules/instancenorm.h>
26
+ #include <torch/nn/modules/linear.h>
27
+ #include <torch/nn/modules/loss.h>
28
+ #include <torch/nn/modules/normalization.h>
29
+ #include <torch/nn/modules/padding.h>
30
+ #include <torch/nn/modules/pixelshuffle.h>
31
+ #include <torch/nn/modules/pooling.h>
32
+ #include <torch/nn/modules/rnn.h>
33
+ #include <torch/nn/modules/transformer.h>
34
+ #include <torch/nn/modules/transformercoder.h>
35
+ #include <torch/nn/modules/transformerlayer.h>
36
+ #include <torch/nn/modules/upsampling.h>
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/_functions.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/custom_function.h>
4
+ #include <torch/csrc/autograd/variable.h>
5
+ #include <torch/nn/options/normalization.h>
6
+ #include <torch/types.h>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+ namespace functions {
11
+
12
+ class CrossMapLRN2d : public torch::autograd::Function<CrossMapLRN2d> {
13
+ public:
14
+ static torch::autograd::Variable forward(
15
+ torch::autograd::AutogradContext* ctx,
16
+ const torch::autograd::Variable& input,
17
+ const CrossMapLRN2dOptions& options);
18
+
19
+ static torch::autograd::variable_list backward(
20
+ torch::autograd::AutogradContext* ctx,
21
+ torch::autograd::variable_list grad_output);
22
+ };
23
+
24
+ } // namespace functions
25
+ } // namespace nn
26
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/activation.h ADDED
@@ -0,0 +1,875 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/functional/activation.h>
5
+ #include <torch/nn/modules/common.h>
6
+ #include <torch/nn/modules/linear.h>
7
+ #include <torch/nn/options/activation.h>
8
+
9
+ #include <torch/csrc/Export.h>
10
+
11
+ namespace torch {
12
+ namespace nn {
13
+
14
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
15
+
16
+ /// Applies elu over a given input.
17
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ELU to learn
18
+ /// about the exact behavior of this module.
19
+ ///
20
+ /// See the documentation for `torch::nn::ELUOptions` class to learn what
21
+ /// constructor arguments are supported for this module.
22
+ ///
23
+ /// Example:
24
+ /// ```
25
+ /// ELU model(ELUOptions().alpha(42.42).inplace(true));
26
+ /// ```
27
+ class TORCH_API ELUImpl : public torch::nn::Cloneable<ELUImpl> {
28
+ public:
29
+ explicit ELUImpl(const ELUOptions& options_ = {});
30
+
31
+ Tensor forward(Tensor input);
32
+
33
+ void reset() override;
34
+
35
+ /// Pretty prints the `ELU` module into the given `stream`.
36
+ void pretty_print(std::ostream& stream) const override;
37
+
38
+ /// The options with which this `Module` was constructed.
39
+ ELUOptions options;
40
+ };
41
+
42
+ /// A `ModuleHolder` subclass for `ELUImpl`.
43
+ /// See the documentation for `ELUImpl` class to learn what methods it
44
+ /// provides, and examples of how to use `ELU` with `torch::nn::ELUOptions`.
45
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
46
+ /// module storage semantics.
47
+ TORCH_MODULE(ELU);
48
+
49
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
50
+
51
+ /// Applies the selu function element-wise.
52
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.SELU to learn
53
+ /// about the exact behavior of this module.
54
+ ///
55
+ /// See the documentation for `torch::nn::SELUOptions` class to learn what
56
+ /// constructor arguments are supported for this module.
57
+ ///
58
+ /// Example:
59
+ /// ```
60
+ /// SELU model(SELUOptions().inplace(true));
61
+ /// ```
62
+ class TORCH_API SELUImpl : public torch::nn::Cloneable<SELUImpl> {
63
+ public:
64
+ explicit SELUImpl(const SELUOptions& options_ = {});
65
+
66
+ Tensor forward(Tensor input);
67
+
68
+ void reset() override;
69
+
70
+ /// Pretty prints the `SELU` module into the given `stream`.
71
+ void pretty_print(std::ostream& stream) const override;
72
+
73
+ /// The options with which this `Module` was constructed.
74
+ SELUOptions options;
75
+ };
76
+
77
+ /// A `ModuleHolder` subclass for `SELUImpl`.
78
+ /// See the documentation for `SELUImpl` class to learn what methods it
79
+ /// provides, and examples of how to use `SELU` with `torch::nn::SELUOptions`.
80
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
81
+ /// module storage semantics.
82
+ TORCH_MODULE(SELU);
83
+
84
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hardshrink ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
85
+
86
+ /// Applies the hard shrinkage function element-wise.
87
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Hardshrink to learn
88
+ /// about the exact behavior of this module.
89
+ ///
90
+ /// See the documentation for `torch::nn::HardshrinkOptions` class to learn what
91
+ /// constructor arguments are supported for this module.
92
+ ///
93
+ /// Example:
94
+ /// ```
95
+ /// Hardshrink model(HardshrinkOptions().lambda(42.42));
96
+ /// ```
97
+ class TORCH_API HardshrinkImpl : public torch::nn::Cloneable<HardshrinkImpl> {
98
+ public:
99
+ explicit HardshrinkImpl(const HardshrinkOptions& options_ = {});
100
+
101
+ Tensor forward(const Tensor& input);
102
+
103
+ void reset() override;
104
+
105
+ /// Pretty prints the `Hardshrink` module into the given `stream`.
106
+ void pretty_print(std::ostream& stream) const override;
107
+
108
+ /// The options with which this `Module` was constructed.
109
+ HardshrinkOptions options;
110
+ };
111
+
112
+ /// A `ModuleHolder` subclass for `HardshrinkImpl`.
113
+ /// See the documentation for `HardshrinkImpl` class to learn what methods it
114
+ /// provides, and examples of how to use `Hardshrink` with
115
+ /// `torch::nn::HardshrinkOptions`. See the documentation for `ModuleHolder` to
116
+ /// learn about PyTorch's module storage semantics.
117
+ TORCH_MODULE(Hardshrink);
118
+
119
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hardtanh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
120
+
121
+ /// Applies the HardTanh function element-wise.
122
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Hardtanh to learn
123
+ /// about the exact behavior of this module.
124
+ ///
125
+ /// See the documentation for `torch::nn::HardtanhOptions` class to learn what
126
+ /// constructor arguments are supported for this module.
127
+ ///
128
+ /// Example:
129
+ /// ```
130
+ /// Hardtanh
131
+ /// model(HardtanhOptions().min_val(-42.42).max_val(0.42).inplace(true));
132
+ /// ```
133
+ class TORCH_API HardtanhImpl : public torch::nn::Cloneable<HardtanhImpl> {
134
+ public:
135
+ explicit HardtanhImpl(const HardtanhOptions& options_ = {});
136
+
137
+ Tensor forward(Tensor input);
138
+
139
+ void reset() override;
140
+
141
+ /// Pretty prints the `Hardtanh` module into the given `stream`.
142
+ void pretty_print(std::ostream& stream) const override;
143
+
144
+ /// The options with which this `Module` was constructed.
145
+ HardtanhOptions options;
146
+ };
147
+
148
+ /// A `ModuleHolder` subclass for `HardtanhImpl`.
149
+ /// See the documentation for `HardtanhImpl` class to learn what methods it
150
+ /// provides, and examples of how to use `Hardtanh` with
151
+ /// `torch::nn::HardtanhOptions`. See the documentation for `ModuleHolder` to
152
+ /// learn about PyTorch's module storage semantics.
153
+ TORCH_MODULE(Hardtanh);
154
+
155
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LeakyReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
156
+
157
+ /// Applies the LeakyReLU function element-wise.
158
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.LeakyReLU to learn
159
+ /// about the exact behavior of this module.
160
+ ///
161
+ /// See the documentation for `torch::nn::LeakyReLUOptions` class to learn what
162
+ /// constructor arguments are supported for this module.
163
+ ///
164
+ /// Example:
165
+ /// ```
166
+ /// LeakyReLU model(LeakyReLUOptions().negative_slope(0.42).inplace(true));
167
+ /// ```
168
+ class TORCH_API LeakyReLUImpl : public torch::nn::Cloneable<LeakyReLUImpl> {
169
+ public:
170
+ explicit LeakyReLUImpl(const LeakyReLUOptions& options_ = {});
171
+
172
+ Tensor forward(Tensor input);
173
+
174
+ void reset() override;
175
+
176
+ /// Pretty prints the `LeakyReLU` module into the given `stream`.
177
+ void pretty_print(std::ostream& stream) const override;
178
+
179
+ /// The options with which this `Module` was constructed.
180
+ LeakyReLUOptions options;
181
+ };
182
+
183
+ /// A `ModuleHolder` subclass for `LeakyReLUImpl`.
184
+ /// See the documentation for `LeakyReLUImpl` class to learn what methods it
185
+ /// provides, and examples of how to use `LeakyReLU` with
186
+ /// `torch::nn::LeakyReLUOptions`. See the documentation for `ModuleHolder` to
187
+ /// learn about PyTorch's module storage semantics.
188
+ TORCH_MODULE(LeakyReLU);
189
+
190
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LogSigmoid ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
191
+
192
+ /// Applies the LogSigmoid function element-wise.
193
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.LogSigmoid to learn
194
+ /// about the exact behavior of this module.
195
+ class TORCH_API LogSigmoidImpl : public torch::nn::Cloneable<LogSigmoidImpl> {
196
+ public:
197
+ Tensor forward(const Tensor& input);
198
+
199
+ void reset() override;
200
+
201
+ /// Pretty prints the `LogSigmoid` module into the given `stream`.
202
+ void pretty_print(std::ostream& stream) const override;
203
+ };
204
+
205
+ /// A `ModuleHolder` subclass for `LogSigmoidImpl`.
206
+ /// See the documentation for `LogSigmoidImpl` class to learn what methods it
207
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
208
+ /// module storage semantics.
209
+ TORCH_MODULE(LogSigmoid);
210
+
211
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softmax ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
212
+
213
+ /// Applies the Softmax function.
214
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Softmax to learn
215
+ /// about the exact behavior of this module.
216
+ ///
217
+ /// See the documentation for `torch::nn::SoftmaxOptions` class to learn what
218
+ /// constructor arguments are supported for this module.
219
+ ///
220
+ /// Example:
221
+ /// ```
222
+ /// Softmax model(SoftmaxOptions(1));
223
+ /// ```
224
+ class TORCH_API SoftmaxImpl : public torch::nn::Cloneable<SoftmaxImpl> {
225
+ public:
226
+ explicit SoftmaxImpl(int64_t dim) : SoftmaxImpl(SoftmaxOptions(dim)) {}
227
+ explicit SoftmaxImpl(const SoftmaxOptions& options_);
228
+
229
+ Tensor forward(const Tensor& input);
230
+
231
+ void reset() override;
232
+
233
+ /// Pretty prints the `Softmax` module into the given `stream`.
234
+ void pretty_print(std::ostream& stream) const override;
235
+
236
+ SoftmaxOptions options;
237
+ };
238
+
239
+ /// A `ModuleHolder` subclass for `SoftmaxImpl`.
240
+ /// See the documentation for `SoftmaxImpl` class to learn what methods it
241
+ /// provides, and examples of how to use `Softmax` with
242
+ /// `torch::nn::SoftmaxOptions`. See the documentation for `ModuleHolder` to
243
+ /// learn about PyTorch's module storage semantics.
244
+ TORCH_MODULE(Softmax);
245
+
246
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softmin ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
247
+
248
+ /// Applies the Softmin function element-wise.
249
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Softmin to learn
250
+ /// about the exact behavior of this module.
251
+ ///
252
+ /// See the documentation for `torch::nn::SoftminOptions` class to learn what
253
+ /// constructor arguments are supported for this module.
254
+ ///
255
+ /// Example:
256
+ /// ```
257
+ /// Softmin model(SoftminOptions(1));
258
+ /// ```
259
+ class TORCH_API SoftminImpl : public torch::nn::Cloneable<SoftminImpl> {
260
+ public:
261
+ explicit SoftminImpl(int64_t dim) : SoftminImpl(SoftminOptions(dim)) {}
262
+ explicit SoftminImpl(const SoftminOptions& options_);
263
+
264
+ Tensor forward(const Tensor& input);
265
+
266
+ void reset() override;
267
+
268
+ /// Pretty prints the `Softmin` module into the given `stream`.
269
+ void pretty_print(std::ostream& stream) const override;
270
+
271
+ SoftminOptions options;
272
+ };
273
+
274
+ /// A `ModuleHolder` subclass for `SoftminImpl`.
275
+ /// See the documentation for `SoftminImpl` class to learn what methods it
276
+ /// provides, and examples of how to use `Softmin` with
277
+ /// `torch::nn::SoftminOptions`. See the documentation for `ModuleHolder` to
278
+ /// learn about PyTorch's module storage semantics.
279
+ TORCH_MODULE(Softmin);
280
+
281
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LogSoftmax ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
282
+
283
+ /// Applies the LogSoftmax function element-wise.
284
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.LogSoftmax to learn
285
+ /// about the exact behavior of this module.
286
+ ///
287
+ /// See the documentation for `torch::nn::LogSoftmaxOptions` class to learn what
288
+ /// constructor arguments are supported for this module.
289
+ ///
290
+ /// Example:
291
+ /// ```
292
+ /// LogSoftmax model(LogSoftmaxOptions(1));
293
+ /// ```
294
+ class TORCH_API LogSoftmaxImpl : public torch::nn::Cloneable<LogSoftmaxImpl> {
295
+ public:
296
+ explicit LogSoftmaxImpl(int64_t dim)
297
+ : LogSoftmaxImpl(LogSoftmaxOptions(dim)) {}
298
+ explicit LogSoftmaxImpl(const LogSoftmaxOptions& options_);
299
+
300
+ Tensor forward(const Tensor& input);
301
+
302
+ void reset() override;
303
+
304
+ /// Pretty prints the `LogSoftmax` module into the given `stream`.
305
+ void pretty_print(std::ostream& stream) const override;
306
+
307
+ LogSoftmaxOptions options;
308
+ };
309
+
310
+ /// A `ModuleHolder` subclass for `LogSoftmaxImpl`.
311
+ /// See the documentation for `LogSoftmaxImpl` class to learn what methods it
312
+ /// provides, and examples of how to use `LogSoftmax` with
313
+ /// `torch::nn::LogSoftmaxOptions`. See the documentation for `ModuleHolder` to
314
+ /// learn about PyTorch's module storage semantics.
315
+ TORCH_MODULE(LogSoftmax);
316
+
317
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softmax2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
318
+
319
+ /// Applies the Softmax2d function element-wise.
320
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Softmax2d to learn
321
+ /// about the exact behavior of this module.
322
+ class TORCH_API Softmax2dImpl : public torch::nn::Cloneable<Softmax2dImpl> {
323
+ public:
324
+ Tensor forward(const Tensor& input);
325
+
326
+ void reset() override;
327
+
328
+ /// Pretty prints the `Softmax2d` module into the given `stream`.
329
+ void pretty_print(std::ostream& stream) const override;
330
+ };
331
+
332
+ /// A `ModuleHolder` subclass for `Softmax2dImpl`.
333
+ /// See the documentation for `Softmax2dImpl` class to learn what methods it
334
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
335
+ /// module storage semantics.
336
+ TORCH_MODULE(Softmax2d);
337
+
338
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
339
+
340
+ /// Applies the PReLU function element-wise.
341
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.PReLU to learn
342
+ /// about the exact behavior of this module.
343
+ ///
344
+ /// See the documentation for `torch::nn::PReLUOptions` class to learn what
345
+ /// constructor arguments are supported for this module.
346
+ ///
347
+ /// Example:
348
+ /// ```
349
+ /// PReLU model(PReLUOptions().num_parameters(42));
350
+ /// ```
351
+ class TORCH_API PReLUImpl : public torch::nn::Cloneable<PReLUImpl> {
352
+ public:
353
+ explicit PReLUImpl(const PReLUOptions& options_ = {});
354
+
355
+ Tensor forward(const Tensor& input);
356
+
357
+ void reset() override;
358
+
359
+ /// Pretty prints the `PReLU` module into the given `stream`.
360
+ void pretty_print(std::ostream& stream) const override;
361
+
362
+ /// The options with which this `Module` was constructed.
363
+ PReLUOptions options;
364
+
365
+ /// The learned weight.
366
+ Tensor weight;
367
+ };
368
+
369
+ /// A `ModuleHolder` subclass for `PReLUImpl`.
370
+ /// See the documentation for `PReLUImpl` class to learn what methods it
371
+ /// provides, and examples of how to use `PReLU` with `torch::nn::PReLUOptions`.
372
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
373
+ /// module storage semantics.
374
+ TORCH_MODULE(PReLU);
375
+
376
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
377
+
378
+ /// Applies the ReLU function element-wise.
379
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ReLU to learn
380
+ /// about the exact behavior of this module.
381
+ ///
382
+ /// See the documentation for `torch::nn::ReLUOptions` class to learn what
383
+ /// constructor arguments are supported for this module.
384
+ ///
385
+ /// Example:
386
+ /// ```
387
+ /// ReLU model(ReLUOptions().inplace(true));
388
+ /// ```
389
+ class TORCH_API ReLUImpl : public torch::nn::Cloneable<ReLUImpl> {
390
+ public:
391
+ explicit ReLUImpl(const ReLUOptions& options_ = {});
392
+
393
+ Tensor forward(Tensor input);
394
+
395
+ void reset() override;
396
+
397
+ /// Pretty prints the `ReLU` module into the given `stream`.
398
+ void pretty_print(std::ostream& stream) const override;
399
+
400
+ /// The options with which this `Module` was constructed.
401
+ ReLUOptions options;
402
+ };
403
+
404
+ /// A `ModuleHolder` subclass for `ReLUImpl`.
405
+ /// See the documentation for `ReLUImpl` class to learn what methods it
406
+ /// provides, and examples of how to use `ReLU` with `torch::nn::ReLUOptions`.
407
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
408
+ /// module storage semantics.
409
+ TORCH_MODULE(ReLU);
410
+
411
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReLU6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
412
+
413
+ /// Applies the ReLU6 function element-wise.
414
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ReLU6 to learn
415
+ /// about the exact behavior of this module.
416
+ ///
417
+ /// See the documentation for `torch::nn::ReLU6Options` class to learn what
418
+ /// constructor arguments are supported for this module.
419
+ ///
420
+ /// Example:
421
+ /// ```
422
+ /// ReLU6 model(ReLU6Options().inplace(true));
423
+ /// ```
424
+ class TORCH_API ReLU6Impl : public torch::nn::Cloneable<ReLU6Impl> {
425
+ public:
426
+ explicit ReLU6Impl(const ReLU6Options& options_ = {});
427
+
428
+ Tensor forward(Tensor input);
429
+
430
+ void reset() override;
431
+
432
+ /// Pretty prints the `ReLU6` module into the given `stream`.
433
+ void pretty_print(std::ostream& stream) const override;
434
+
435
+ /// The options with which this `Module` was constructed.
436
+ ReLU6Options options;
437
+ };
438
+
439
+ /// A `ModuleHolder` subclass for `ReLU6Impl`.
440
+ /// See the documentation for `ReLU6Impl` class to learn what methods it
441
+ /// provides, and examples of how to use `ReLU6` with `torch::nn::ReLU6Options`.
442
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
443
+ /// module storage semantics.
444
+ TORCH_MODULE(ReLU6);
445
+
446
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
447
+
448
+ /// Applies the RReLU function element-wise.
449
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.RReLU to learn
450
+ /// about the exact behavior of this module.
451
+ ///
452
+ /// See the documentation for `torch::nn::RReLUOptions` class to learn what
453
+ /// constructor arguments are supported for this module.
454
+ ///
455
+ /// Example:
456
+ /// ```
457
+ /// RReLU model(RReLUOptions().lower(0.24).upper(0.42).inplace(true));
458
+ /// ```
459
+ class TORCH_API RReLUImpl : public torch::nn::Cloneable<RReLUImpl> {
460
+ public:
461
+ explicit RReLUImpl(const RReLUOptions& options_ = {});
462
+
463
+ Tensor forward(Tensor input);
464
+
465
+ void reset() override;
466
+
467
+ /// Pretty prints the `RReLU` module into the given `stream`.
468
+ void pretty_print(std::ostream& stream) const override;
469
+
470
+ /// The options with which this `Module` was constructed.
471
+ RReLUOptions options;
472
+ };
473
+
474
+ /// A `ModuleHolder` subclass for `RReLUImpl`.
475
+ /// See the documentation for `RReLUImpl` class to learn what methods it
476
+ /// provides, and examples of how to use `RReLU` with `torch::nn::RReLUOptions`.
477
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
478
+ /// module storage semantics.
479
+ TORCH_MODULE(RReLU);
480
+
481
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
482
+
483
+ /// Applies celu over a given input.
484
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.CELU to learn
485
+ /// about the exact behavior of this module.
486
+ ///
487
+ /// See the documentation for `torch::nn::CELUOptions` class to learn what
488
+ /// constructor arguments are supported for this module.
489
+ ///
490
+ /// Example:
491
+ /// ```
492
+ /// CELU model(CELUOptions().alpha(42.42).inplace(true));
493
+ /// ```
494
+ class TORCH_API CELUImpl : public torch::nn::Cloneable<CELUImpl> {
495
+ public:
496
+ explicit CELUImpl(const CELUOptions& options_ = {});
497
+
498
+ Tensor forward(Tensor input);
499
+
500
+ void reset() override;
501
+
502
+ /// Pretty prints the `CELU` module into the given `stream`.
503
+ void pretty_print(std::ostream& stream) const override;
504
+
505
+ /// The options with which this `Module` was constructed.
506
+ CELUOptions options;
507
+ };
508
+
509
+ /// A `ModuleHolder` subclass for `CELUImpl`.
510
+ /// See the documentation for `CELUImpl` class to learn what methods it
511
+ /// provides, and examples of how to use `CELU` with `torch::nn::CELUOptions`.
512
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
513
+ /// module storage semantics.
514
+ TORCH_MODULE(CELU);
515
+
516
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
517
+
518
+ /// Applies glu over a given input.
519
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.GLU to learn
520
+ /// about the exact behavior of this module.
521
+ ///
522
+ /// See the documentation for `torch::nn::GLUOptions` class to learn what
523
+ /// constructor arguments are supported for this module.
524
+ ///
525
+ /// Example:
526
+ /// ```
527
+ /// GLU model(GLUOptions(1));
528
+ /// ```
529
+ class TORCH_API GLUImpl : public torch::nn::Cloneable<GLUImpl> {
530
+ public:
531
+ explicit GLUImpl(const GLUOptions& options_ = {});
532
+
533
+ Tensor forward(const Tensor& input);
534
+
535
+ void reset() override;
536
+
537
+ /// Pretty prints the `GLU` module into the given `stream`.
538
+ void pretty_print(std::ostream& stream) const override;
539
+
540
+ /// The options with which this `Module` was constructed.
541
+ GLUOptions options;
542
+ };
543
+
544
+ /// A `ModuleHolder` subclass for `GLUImpl`.
545
+ /// See the documentation for `GLUImpl` class to learn what methods it
546
+ /// provides, and examples of how to use `GLU` with `torch::nn::GLUOptions`.
547
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
548
+ /// module storage semantics.
549
+ TORCH_MODULE(GLU);
550
+
551
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
552
+
553
+ /// Applies gelu over a given input.
554
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.GELU to learn
555
+ /// about the exact behavior of this module.
556
+ class TORCH_API GELUImpl : public torch::nn::Cloneable<GELUImpl> {
557
+ public:
558
+ explicit GELUImpl(GELUOptions options_ = {});
559
+
560
+ Tensor forward(const Tensor& input);
561
+
562
+ void reset() override;
563
+
564
+ /// Pretty prints the `GELU` module into the given `stream`.
565
+ void pretty_print(std::ostream& stream) const override;
566
+
567
+ /// The options with which this `Module` was constructed.
568
+ GELUOptions options;
569
+ };
570
+
571
+ /// A `ModuleHolder` subclass for `GELUImpl`.
572
+ /// See the documentation for `GELUImpl` class to learn what methods it
573
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
574
+ /// module storage semantics.
575
+ TORCH_MODULE(GELU);
576
+
577
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SiLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
578
+
579
+ /// Applies silu over a given input.
580
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.SiLU to learn
581
+ /// about the exact behavior of this module.
582
+ class TORCH_API SiLUImpl : public torch::nn::Cloneable<SiLUImpl> {
583
+ public:
584
+ Tensor forward(const Tensor& input);
585
+
586
+ void reset() override;
587
+
588
+ /// Pretty prints the `SiLU` module into the given `stream`.
589
+ void pretty_print(std::ostream& stream) const override;
590
+ };
591
+
592
+ /// A `ModuleHolder` subclass for `SiLUImpl`.
593
+ /// See the documentation for `SiLUImpl` class to learn what methods it
594
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
595
+ /// module storage semantics.
596
+ TORCH_MODULE(SiLU);
597
+
598
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Mish ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
599
+
600
+ /// Applies mish over a given input.
601
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Mish to learn
602
+ /// about the exact behavior of this module.
603
+ class TORCH_API MishImpl : public torch::nn::Cloneable<MishImpl> {
604
+ public:
605
+ Tensor forward(const Tensor& input);
606
+
607
+ void reset() override;
608
+
609
+ /// Pretty prints the `Mish` module into the given `stream`.
610
+ void pretty_print(std::ostream& stream) const override;
611
+ };
612
+
613
+ /// A `ModuleHolder` subclass for `MishImpl`.
614
+ /// See the documentation for `MishImpl` class to learn what methods it
615
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
616
+ /// module storage semantics.
617
+ TORCH_MODULE(Mish);
618
+
619
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sigmoid ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
620
+
621
+ /// Applies sigmoid over a given input.
622
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Sigmoid to learn
623
+ /// about the exact behavior of this module.
624
+ class TORCH_API SigmoidImpl : public torch::nn::Cloneable<SigmoidImpl> {
625
+ public:
626
+ Tensor forward(const Tensor& input);
627
+
628
+ void reset() override;
629
+
630
+ /// Pretty prints the `Sigmoid` module into the given `stream`.
631
+ void pretty_print(std::ostream& stream) const override;
632
+ };
633
+
634
+ /// A `ModuleHolder` subclass for `SigmoidImpl`.
635
+ /// See the documentation for `SigmoidImpl` class to learn what methods it
636
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
637
+ /// module storage semantics.
638
+ TORCH_MODULE(Sigmoid);
639
+
640
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softplus ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
641
+
642
+ /// Applies softplus over a given input.
643
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Softplus to learn
644
+ /// about the exact behavior of this module.
645
+ ///
646
+ /// See the documentation for `torch::nn::SoftplusOptions` class to learn what
647
+ /// constructor arguments are supported for this module.
648
+ ///
649
+ /// Example:
650
+ /// ```
651
+ /// Softplus model(SoftplusOptions().beta(0.24).threshold(42.42));
652
+ /// ```
653
+ class TORCH_API SoftplusImpl : public torch::nn::Cloneable<SoftplusImpl> {
654
+ public:
655
+ explicit SoftplusImpl(const SoftplusOptions& options_ = {});
656
+
657
+ Tensor forward(const Tensor& input);
658
+
659
+ void reset() override;
660
+
661
+ /// Pretty prints the `Softplus` module into the given `stream`.
662
+ void pretty_print(std::ostream& stream) const override;
663
+
664
+ /// The options with which this `Module` was constructed.
665
+ SoftplusOptions options;
666
+ };
667
+
668
+ /// A `ModuleHolder` subclass for `SoftplusImpl`.
669
+ /// See the documentation for `SoftplusImpl` class to learn what methods it
670
+ /// provides, and examples of how to use `Softplus` with
671
+ /// `torch::nn::SoftplusOptions`. See the documentation for `ModuleHolder` to
672
+ /// learn about PyTorch's module storage semantics.
673
+ TORCH_MODULE(Softplus);
674
+
675
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softshrink ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
676
+
677
+ /// Applies the soft shrinkage function element-wise.
678
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Softshrink to learn
679
+ /// about the exact behavior of this module.
680
+ ///
681
+ /// See the documentation for `torch::nn::SoftshrinkOptions` class to learn what
682
+ /// constructor arguments are supported for this module.
683
+ ///
684
+ /// Example:
685
+ /// ```
686
+ /// Softshrink model(SoftshrinkOptions(42.42));
687
+ /// ```
688
+ class TORCH_API SoftshrinkImpl : public torch::nn::Cloneable<SoftshrinkImpl> {
689
+ public:
690
+ explicit SoftshrinkImpl(const SoftshrinkOptions& options_ = {});
691
+
692
+ Tensor forward(const Tensor& input);
693
+
694
+ void reset() override;
695
+
696
+ /// Pretty prints the `Softshrink` module into the given `stream`.
697
+ void pretty_print(std::ostream& stream) const override;
698
+
699
+ /// The options with which this `Module` was constructed.
700
+ SoftshrinkOptions options;
701
+ };
702
+
703
+ /// A `ModuleHolder` subclass for `SoftshrinkImpl`.
704
+ /// See the documentation for `SoftshrinkImpl` class to learn what methods it
705
+ /// provides, and examples of how to use `Softshrink` with
706
+ /// `torch::nn::SoftshrinkOptions`. See the documentation for `ModuleHolder` to
707
+ /// learn about PyTorch's module storage semantics.
708
+ TORCH_MODULE(Softshrink);
709
+
710
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softsign ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
711
+
712
+ /// Applies Softsign over a given input.
713
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Softsign to learn
714
+ /// about the exact behavior of this module.
715
+ class TORCH_API SoftsignImpl : public torch::nn::Cloneable<SoftsignImpl> {
716
+ public:
717
+ Tensor forward(const Tensor& input);
718
+
719
+ void reset() override;
720
+
721
+ /// Pretty prints the `Softsign` module into the given `stream`.
722
+ void pretty_print(std::ostream& stream) const override;
723
+ };
724
+
725
+ /// A `ModuleHolder` subclass for `SoftsignImpl`.
726
+ /// See the documentation for `SoftsignImpl` class to learn what methods it
727
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
728
+ /// module storage semantics.
729
+ TORCH_MODULE(Softsign);
730
+
731
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tanh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
732
+
733
+ /// Applies Tanh over a given input.
734
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Tanh to learn
735
+ /// about the exact behavior of this module.
736
+ class TORCH_API TanhImpl : public torch::nn::Cloneable<TanhImpl> {
737
+ public:
738
+ Tensor forward(const Tensor& input);
739
+
740
+ void reset() override;
741
+
742
+ /// Pretty prints the `Tanh` module into the given `stream`.
743
+ void pretty_print(std::ostream& stream) const override;
744
+ };
745
+
746
+ /// A `ModuleHolder` subclass for `TanhImpl`.
747
+ /// See the documentation for `TanhImpl` class to learn what methods it
748
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
749
+ /// module storage semantics.
750
+ TORCH_MODULE(Tanh);
751
+
752
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tanhshrink ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
753
+
754
+ /// Applies Tanhshrink over a given input.
755
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Tanhshrink to learn
756
+ /// about the exact behavior of this module.
757
+ class TORCH_API TanhshrinkImpl : public torch::nn::Cloneable<TanhshrinkImpl> {
758
+ public:
759
+ Tensor forward(const Tensor& input);
760
+
761
+ void reset() override;
762
+
763
+ /// Pretty prints the `Tanhshrink` module into the given `stream`.
764
+ void pretty_print(std::ostream& stream) const override;
765
+ };
766
+
767
+ /// A `ModuleHolder` subclass for `TanhshrinkImpl`.
768
+ /// See the documentation for `TanhshrinkImpl` class to learn what methods it
769
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
770
+ /// module storage semantics.
771
+ TORCH_MODULE(Tanhshrink);
772
+
773
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Threshold ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
774
+
775
+ /// Applies the Threshold function element-wise.
776
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Threshold to learn
777
+ /// about the exact behavior of this module.
778
+ ///
779
+ /// See the documentation for `torch::nn::ThresholdOptions` class to learn what
780
+ /// constructor arguments are supported for this module.
781
+ ///
782
+ /// Example:
783
+ /// ```
784
+ /// Threshold model(ThresholdOptions(42.42, 24.24).inplace(true));
785
+ /// ```
786
+ class TORCH_API ThresholdImpl : public torch::nn::Cloneable<ThresholdImpl> {
787
+ public:
788
+ ThresholdImpl(double threshold, double value)
789
+ : ThresholdImpl(ThresholdOptions(threshold, value)) {}
790
+ explicit ThresholdImpl(const ThresholdOptions& options_);
791
+
792
+ Tensor forward(Tensor input);
793
+
794
+ void reset() override;
795
+
796
+ /// Pretty prints the `Threshold` module into the given `stream`.
797
+ void pretty_print(std::ostream& stream) const override;
798
+
799
+ /// The options with which this `Module` was constructed.
800
+ ThresholdOptions options;
801
+ };
802
+
803
+ /// A `ModuleHolder` subclass for `ThresholdImpl`.
804
+ /// See the documentation for `ThresholdImpl` class to learn what methods it
805
+ /// provides, and examples of how to use `Threshold` with
806
+ /// `torch::nn::ThresholdOptions`. See the documentation for `ModuleHolder` to
807
+ /// learn about PyTorch's module storage semantics.
808
+ TORCH_MODULE(Threshold);
809
+
810
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MultiheadAttention ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
811
+
812
+ /// Applies the MultiheadAttention function element-wise.
813
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.MultiheadAttention
814
+ /// to learn about the exact behavior of this module.
815
+ ///
816
+ /// See the documentation for `torch::nn::MultiheadAttentionOptions` class to
817
+ /// learn what constructor arguments are supported for this module.
818
+ ///
819
+ /// Example:
820
+ /// ```
821
+ /// MultiheadAttention model(MultiheadAttentionOptions(20, 10).bias(false));
822
+ /// ```
823
+ class TORCH_API MultiheadAttentionImpl
824
+ : public torch::nn::Cloneable<MultiheadAttentionImpl> {
825
+ public:
826
+ MultiheadAttentionImpl(int64_t embed_dim, int64_t num_heads)
827
+ : MultiheadAttentionImpl(
828
+ MultiheadAttentionOptions(embed_dim, num_heads)) {}
829
+ explicit MultiheadAttentionImpl(const MultiheadAttentionOptions& options_);
830
+
831
+ std::tuple<Tensor, Tensor> forward(
832
+ const Tensor& query,
833
+ const Tensor& key,
834
+ const Tensor& value,
835
+ const Tensor& key_padding_mask = {},
836
+ bool need_weights = true,
837
+ const Tensor& attn_mask = {},
838
+ bool average_attn_weights = true);
839
+
840
+ protected:
841
+ FORWARD_HAS_DEFAULT_ARGS(
842
+ {3, AnyValue(Tensor())},
843
+ {4, AnyValue(true)},
844
+ {5, AnyValue(Tensor())},
845
+ {6, AnyValue(true)})
846
+
847
+ public:
848
+ void reset() override;
849
+
850
+ void _reset_parameters();
851
+
852
+ /// The options with which this `Module` was constructed.
853
+ MultiheadAttentionOptions options;
854
+
855
+ bool _qkv_same_embed_dim;
856
+ Tensor in_proj_weight;
857
+ Tensor in_proj_bias;
858
+ Tensor bias_k;
859
+ Tensor bias_v;
860
+ Linear out_proj = nullptr;
861
+ Tensor q_proj_weight;
862
+ Tensor k_proj_weight;
863
+ Tensor v_proj_weight;
864
+ int64_t head_dim;
865
+ };
866
+
867
+ /// A `ModuleHolder` subclass for `MultiheadAttentionImpl`.
868
+ /// See the documentation for `MultiheadAttentionImpl` class to learn what
869
+ /// methods it provides, and examples of how to use `MultiheadAttention` with
870
+ /// `torch::nn::MultiheadAttentionOptions`. See the documentation for
871
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
872
+ TORCH_MODULE(MultiheadAttention);
873
+
874
+ } // namespace nn
875
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/adaptive.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/functional/activation.h>
5
+ #include <torch/nn/module.h>
6
+ #include <torch/nn/modules/container/modulelist.h>
7
+ #include <torch/nn/modules/container/sequential.h>
8
+ #include <torch/nn/modules/linear.h>
9
+ #include <torch/nn/options/adaptive.h>
10
+
11
+ namespace torch {
12
+ namespace nn {
13
+
14
+ /// The output of a single invocation of an AdaptiveLogSoftmaxWithLoss
15
+ /// module's `forward()` method.
16
+ struct TORCH_API ASMoutput {
17
+ ASMoutput(Tensor output_, double loss_);
18
+
19
+ /// Tensor containing computed target log probabilities for each example
20
+ Tensor output;
21
+
22
+ /// Scalar representing the computed negative log likelihood loss
23
+ double loss;
24
+ };
25
+
26
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveLogSoftmaxWithLoss
27
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28
+
29
+ /// Efficient softmax approximation as described in
30
+ /// `Efficient softmax approximation for GPUs`_ by Edouard Grave, Armand Joulin,
31
+ /// Moustapha Cissé, David Grangier, and Hervé Jégou.
32
+ /// See
33
+ /// https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveLogSoftmaxWithLoss
34
+ /// to learn about the exact behavior of this module.
35
+ ///
36
+ /// See the documentation for `torch::nn::AdaptiveLogSoftmaxWithLossOptions`
37
+ /// class to learn what constructor arguments are supported for this module.
38
+ ///
39
+ /// Example:
40
+ /// ```
41
+ /// AdaptiveLogSoftmaxWithLoss model(AdaptiveLogSoftmaxWithLossOptions(8, 10,
42
+ /// {4, 8}).div_value(2.).head_bias(true));
43
+ /// ```
44
+ class TORCH_API AdaptiveLogSoftmaxWithLossImpl
45
+ : public Cloneable<AdaptiveLogSoftmaxWithLossImpl> {
46
+ public:
47
+ AdaptiveLogSoftmaxWithLossImpl(
48
+ int64_t in_features,
49
+ int64_t n_classes,
50
+ std::vector<int64_t> cutoffs)
51
+ : AdaptiveLogSoftmaxWithLossImpl(AdaptiveLogSoftmaxWithLossOptions(
52
+ in_features,
53
+ n_classes,
54
+ cutoffs)) {}
55
+
56
+ explicit AdaptiveLogSoftmaxWithLossImpl(
57
+ AdaptiveLogSoftmaxWithLossOptions options_);
58
+
59
+ ASMoutput forward(const Tensor& input, const Tensor& target);
60
+
61
+ void reset() override;
62
+
63
+ void reset_parameters();
64
+
65
+ /// Pretty prints the `AdaptiveLogSoftmaxWithLoss` module into the given
66
+ /// `stream`.
67
+ void pretty_print(std::ostream& stream) const override;
68
+
69
+ /// Given input tensor, and output of `head`, computes the log of the full
70
+ /// distribution
71
+ Tensor _get_full_log_prob(const Tensor& input, const Tensor& head_output);
72
+
73
+ /// Computes log probabilities for all n_classes
74
+ Tensor log_prob(const Tensor& input);
75
+
76
+ /// This is equivalent to `log_pob(input).argmax(1)` but is more efficient in
77
+ /// some cases
78
+ Tensor predict(const Tensor& input);
79
+
80
+ /// The options with which this `Module` was constructed
81
+ AdaptiveLogSoftmaxWithLossOptions options;
82
+
83
+ /// Cutoffs used to assign targets to their buckets. It should be an ordered
84
+ /// Sequence of integers sorted in the increasing order
85
+ std::vector<int64_t> cutoffs;
86
+
87
+ int64_t shortlist_size;
88
+
89
+ /// Number of clusters
90
+ int64_t n_clusters;
91
+
92
+ /// Output size of head classifier
93
+ int64_t head_size;
94
+
95
+ Linear head = nullptr;
96
+
97
+ ModuleList tail;
98
+ };
99
+
100
+ /// A `ModuleHolder` subclass for `AdaptiveLogSoftmaxWithLossImpl`.
101
+ /// See the documentation for `AdaptiveLogSoftmaxWithLossImpl` class to learn
102
+ /// what methods it provides, and examples of how to use
103
+ /// `AdaptiveLogSoftmaxWithLoss` with
104
+ /// `torch::nn::AdaptiveLogSoftmaxWithLossOptions`. See the documentation for
105
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
106
+ TORCH_MODULE(AdaptiveLogSoftmaxWithLoss);
107
+
108
+ } // namespace nn
109
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/batchnorm.h ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/functional/batchnorm.h>
5
+ #include <torch/nn/init.h>
6
+ #include <torch/nn/options/batchnorm.h>
7
+ #include <torch/nn/pimpl.h>
8
+ #include <torch/types.h>
9
+
10
+ #include <cstdint>
11
+
12
+ namespace torch {
13
+ namespace nn {
14
+
15
+ /// Base class for all (dimension-specialized) batchnorm and instancenorm
16
+ /// modules.
17
+ template <size_t D, typename Derived, typename DerivedOptions>
18
+ class NormImplBase : public torch::nn::Cloneable<Derived> {
19
+ protected:
20
+ virtual void _check_input_dim(const Tensor& input) = 0;
21
+
22
+ public:
23
+ NormImplBase(const DerivedOptions& options_) : options(options_) {
24
+ // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall)
25
+ reset();
26
+ }
27
+
28
+ void reset() override {
29
+ if (options.affine()) {
30
+ weight = this->register_parameter(
31
+ "weight", torch::empty({options.num_features()}));
32
+ bias = this->register_parameter(
33
+ "bias", torch::empty({options.num_features()}));
34
+ } else {
35
+ weight =
36
+ this->register_parameter("weight", Tensor(), /*requires_grad=*/false);
37
+ bias =
38
+ this->register_parameter("bias", Tensor(), /*requires_grad=*/false);
39
+ }
40
+ if (options.track_running_stats()) {
41
+ running_mean = this->register_buffer(
42
+ "running_mean", torch::zeros({options.num_features()}));
43
+ running_var = this->register_buffer(
44
+ "running_var", torch::ones({options.num_features()}));
45
+ num_batches_tracked = this->register_buffer(
46
+ "num_batches_tracked", torch::tensor(0, torch::dtype(torch::kLong)));
47
+ } else {
48
+ running_mean = this->register_buffer("running_mean", Tensor());
49
+ running_var = this->register_buffer("running_var", Tensor());
50
+ num_batches_tracked =
51
+ this->register_buffer("num_batches_tracked", Tensor());
52
+ }
53
+ reset_parameters();
54
+ }
55
+
56
+ void reset_running_stats() {
57
+ if (options.track_running_stats()) {
58
+ running_mean.zero_();
59
+ running_var.fill_(1);
60
+ num_batches_tracked.zero_();
61
+ }
62
+ }
63
+
64
+ void reset_parameters() {
65
+ reset_running_stats();
66
+ if (options.affine()) {
67
+ torch::nn::init::ones_(weight);
68
+ torch::nn::init::zeros_(bias);
69
+ }
70
+ }
71
+
72
+ /// The options with which this module was constructed.
73
+ DerivedOptions options;
74
+
75
+ /// The learned weight.
76
+ /// Only defined if the `affine` option was `true` upon construction.
77
+ Tensor weight;
78
+
79
+ /// The learned bias.
80
+ /// Only defined if the `affine` option was `true` upon construction.
81
+ Tensor bias;
82
+
83
+ /// The running mean.
84
+ /// Only defined if the `track_running_stats` option was `true` upon
85
+ /// construction.
86
+ Tensor running_mean;
87
+
88
+ /// The running variance.
89
+ /// Only defined if the `track_running_stats` option was `true` upon
90
+ /// construction.
91
+ Tensor running_var;
92
+
93
+ /// The number of the forward call.
94
+ /// Only defined if the `track_running_stats` option was `true` upon
95
+ /// construction.
96
+ Tensor num_batches_tracked;
97
+ };
98
+
99
+ /// Base class for all (dimension-specialized) batchnorm modules.
100
+ template <size_t D, typename Derived>
101
+ class BatchNormImplBase : public NormImplBase<D, Derived, BatchNormOptions> {
102
+ public:
103
+ using NormImplBase<D, Derived, BatchNormOptions>::NormImplBase;
104
+
105
+ Tensor forward(const Tensor& input) {
106
+ this->_check_input_dim(input);
107
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
108
+ double exponential_average_factor;
109
+ if (this->options.momentum() == c10::nullopt) {
110
+ exponential_average_factor = 0.0;
111
+ } else {
112
+ exponential_average_factor = this->options.momentum().value();
113
+ }
114
+
115
+ if (this->is_training() && this->options.track_running_stats()) {
116
+ if (this->num_batches_tracked.defined()) {
117
+ this->num_batches_tracked += 1;
118
+ if (this->options.momentum() ==
119
+ c10::nullopt) { // use cumulative moving average
120
+ exponential_average_factor =
121
+ 1.0 / this->num_batches_tracked.template item<double>();
122
+ } else { // use exponential moving average
123
+ exponential_average_factor = this->options.momentum().value();
124
+ }
125
+ }
126
+ }
127
+
128
+ return torch::nn::functional::detail::batch_norm(
129
+ input,
130
+ this->running_mean,
131
+ this->running_var,
132
+ this->weight,
133
+ this->bias,
134
+ this->is_training() || !this->options.track_running_stats(),
135
+ /*momentum=*/exponential_average_factor,
136
+ this->options.eps());
137
+ }
138
+
139
+ /// Pretty prints the `BatchNorm{1,2,3}d` module into the given `stream`.
140
+ void pretty_print(std::ostream& stream) const override {
141
+ stream << std::boolalpha << "torch::nn::BatchNorm" << D << "d("
142
+ << this->options.num_features() << ", "
143
+ << "eps=" << this->options.eps() << ", "
144
+ << "momentum=";
145
+
146
+ if (this->options.momentum().has_value()) {
147
+ stream << this->options.momentum().value();
148
+ } else {
149
+ stream << "None";
150
+ }
151
+
152
+ stream << ", "
153
+ << "affine=" << this->options.affine() << ", "
154
+ << "track_running_stats=" << this->options.track_running_stats()
155
+ << ")";
156
+ }
157
+ };
158
+
159
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm1d
160
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
161
+
162
+ /// Applies the BatchNorm1d function.
163
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.BatchNorm1d to learn
164
+ /// about the exact behavior of this module.
165
+ ///
166
+ /// See the documentation for `torch::nn::BatchNorm1dOptions` class to learn
167
+ /// what constructor arguments are supported for this module.
168
+ ///
169
+ /// Example:
170
+ /// ```
171
+ /// BatchNorm1d
172
+ /// model(BatchNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
173
+ /// ```
174
+ class TORCH_API BatchNorm1dImpl : public BatchNormImplBase<1, BatchNorm1dImpl> {
175
+ protected:
176
+ void _check_input_dim(const Tensor& input) override;
177
+
178
+ public:
179
+ using BatchNormImplBase<1, BatchNorm1dImpl>::BatchNormImplBase;
180
+ };
181
+
182
+ /// A `ModuleHolder` subclass for `BatchNorm1dImpl`.
183
+ /// See the documentation for `BatchNorm1dImpl` class to learn what methods it
184
+ /// provides, and examples of how to use `BatchNorm1d` with
185
+ /// `torch::nn::BatchNorm1dOptions`. See the documentation for `ModuleHolder` to
186
+ /// learn about PyTorch's module storage semantics.
187
+ TORCH_MODULE(BatchNorm1d);
188
+
189
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm2d
190
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
191
+
192
+ /// Applies the BatchNorm2d function.
193
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.BatchNorm2d to learn
194
+ /// about the exact behavior of this module.
195
+ ///
196
+ /// See the documentation for `torch::nn::BatchNorm2dOptions` class to learn
197
+ /// what constructor arguments are supported for this module.
198
+ ///
199
+ /// Example:
200
+ /// ```
201
+ /// BatchNorm2d
202
+ /// model(BatchNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
203
+ /// ```
204
+ class TORCH_API BatchNorm2dImpl : public BatchNormImplBase<2, BatchNorm2dImpl> {
205
+ protected:
206
+ void _check_input_dim(const Tensor& input) override;
207
+
208
+ public:
209
+ using BatchNormImplBase<2, BatchNorm2dImpl>::BatchNormImplBase;
210
+ };
211
+
212
+ /// A `ModuleHolder` subclass for `BatchNorm2dImpl`.
213
+ /// See the documentation for `BatchNorm2dImpl` class to learn what methods it
214
+ /// provides, and examples of how to use `BatchNorm2d` with
215
+ /// `torch::nn::BatchNorm2dOptions`. See the documentation for `ModuleHolder` to
216
+ /// learn about PyTorch's module storage semantics.
217
+ TORCH_MODULE(BatchNorm2d);
218
+
219
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm3d
220
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
221
+
222
+ /// Applies the BatchNorm3d function.
223
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.BatchNorm3d to learn
224
+ /// about the exact behavior of this module.
225
+ ///
226
+ /// See the documentation for `torch::nn::BatchNorm3dOptions` class to learn
227
+ /// what constructor arguments are supported for this module.
228
+ ///
229
+ /// Example:
230
+ /// ```
231
+ /// BatchNorm3d
232
+ /// model(BatchNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
233
+ /// ```
234
+ class TORCH_API BatchNorm3dImpl : public BatchNormImplBase<3, BatchNorm3dImpl> {
235
+ protected:
236
+ void _check_input_dim(const Tensor& input) override;
237
+
238
+ public:
239
+ using BatchNormImplBase<3, BatchNorm3dImpl>::BatchNormImplBase;
240
+ };
241
+
242
+ /// A `ModuleHolder` subclass for `BatchNorm3dImpl`.
243
+ /// See the documentation for `BatchNorm3dImpl` class to learn what methods it
244
+ /// provides, and examples of how to use `BatchNorm3d` with
245
+ /// `torch::nn::BatchNorm3dOptions`. See the documentation for `ModuleHolder` to
246
+ /// learn about PyTorch's module storage semantics.
247
+ TORCH_MODULE(BatchNorm3d);
248
+
249
+ } // namespace nn
250
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/common.h ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /// This macro enables a module with default arguments in its forward method
4
+ /// to be used in a Sequential module.
5
+ ///
6
+ /// Example usage:
7
+ ///
8
+ /// Let's say we have a module declared like this:
9
+ /// ```
10
+ /// struct MImpl : torch::nn::Module {
11
+ /// public:
12
+ /// explicit MImpl(int value_) : value(value_) {}
13
+ /// torch::Tensor forward(int a, int b = 2, double c = 3.0) {
14
+ /// return torch::tensor(a + b + c);
15
+ /// }
16
+ /// private:
17
+ /// int value;
18
+ /// };
19
+ /// TORCH_MODULE(M);
20
+ /// ```
21
+ ///
22
+ /// If we try to use it in a Sequential module and run forward:
23
+ /// ```
24
+ /// torch::nn::Sequential seq(M(1));
25
+ /// seq->forward(1);
26
+ /// ```
27
+ ///
28
+ /// We will receive the following error message:
29
+ /// ```
30
+ /// MImpl's forward() method expects 3 argument(s), but received 1.
31
+ /// If MImpl's forward() method has default arguments, please make sure
32
+ /// the forward() method is declared with a corresponding
33
+ /// `FORWARD_HAS_DEFAULT_ARGS` macro.
34
+ /// ```
35
+ ///
36
+ /// The right way to fix this error is to use the `FORWARD_HAS_DEFAULT_ARGS`
37
+ /// macro when declaring the module:
38
+ /// ```
39
+ /// struct MImpl : torch::nn::Module {
40
+ /// public:
41
+ /// explicit MImpl(int value_) : value(value_) {}
42
+ /// torch::Tensor forward(int a, int b = 2, double c = 3.0) {
43
+ /// return torch::tensor(a + b + c);
44
+ /// }
45
+ /// protected:
46
+ /// /*
47
+ /// NOTE: looking at the argument list of `forward`:
48
+ /// `forward(int a, int b = 2, double c = 3.0)`
49
+ /// we saw the following default arguments:
50
+ /// ----------------------------------------------------------------
51
+ /// 0-based index of default | Default value of arg
52
+ /// arg in forward arg list | (wrapped by `torch::nn::AnyValue()`)
53
+ /// ----------------------------------------------------------------
54
+ /// 1 | torch::nn::AnyValue(2)
55
+ /// 2 | torch::nn::AnyValue(3.0)
56
+ /// ----------------------------------------------------------------
57
+ /// Thus we pass the following arguments to the `FORWARD_HAS_DEFAULT_ARGS`
58
+ /// macro:
59
+ /// */
60
+ /// FORWARD_HAS_DEFAULT_ARGS({1, torch::nn::AnyValue(2)}, {2,
61
+ /// torch::nn::AnyValue(3.0)})
62
+ /// private:
63
+ /// int value;
64
+ /// };
65
+ /// TORCH_MODULE(M);
66
+ /// ```
67
+ /// Now, running the following would work:
68
+ /// ```
69
+ /// torch::nn::Sequential seq(M(1));
70
+ /// seq->forward(1); // This correctly populates the default arguments for
71
+ /// `MImpl::forward`
72
+ /// ```
73
+ #define FORWARD_HAS_DEFAULT_ARGS(...) \
74
+ template <typename ModuleType, typename... ArgumentTypes> \
75
+ friend struct torch::nn::AnyModuleHolder; \
76
+ bool _forward_has_default_args() override { \
77
+ return true; \
78
+ } \
79
+ unsigned int _forward_num_required_args() override { \
80
+ std::pair<unsigned int, torch::nn::AnyValue> args_info[] = {__VA_ARGS__}; \
81
+ return args_info[0].first; \
82
+ } \
83
+ std::vector<torch::nn::AnyValue> _forward_populate_default_args( \
84
+ std::vector<torch::nn::AnyValue>&& arguments) override { \
85
+ std::pair<unsigned int, torch::nn::AnyValue> args_info[] = {__VA_ARGS__}; \
86
+ unsigned int num_all_args = std::rbegin(args_info)->first + 1; \
87
+ TORCH_INTERNAL_ASSERT( \
88
+ arguments.size() >= _forward_num_required_args() && \
89
+ arguments.size() <= num_all_args); \
90
+ std::vector<torch::nn::AnyValue> ret = std::move(arguments); \
91
+ ret.reserve(num_all_args); \
92
+ for (auto& arg_info : args_info) { \
93
+ if (arg_info.first > ret.size() - 1) \
94
+ ret.emplace_back(std::move(arg_info.second)); \
95
+ } \
96
+ return ret; \
97
+ }
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any.h ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/detail/static.h>
4
+ #include <torch/nn/module.h>
5
+ #include <torch/nn/modules/container/any_module_holder.h>
6
+ #include <torch/nn/modules/container/any_value.h>
7
+ #include <torch/nn/pimpl.h>
8
+ #include <torch/types.h>
9
+
10
+ #include <torch/csrc/autograd/variable.h>
11
+ #include <torch/csrc/utils/variadic.h>
12
+
13
+ #include <ATen/Device.h>
14
+
15
+ #include <memory>
16
+ #include <type_traits>
17
+ #include <typeinfo>
18
+ #include <utility>
19
+ #include <vector>
20
+
21
+ namespace torch {
22
+ namespace nn {
23
+
24
+ /// Stores a type erased `Module`.
25
+ ///
26
+ /// The PyTorch C++ API does not impose an interface on the signature of
27
+ /// `forward()` in `Module` subclasses. This gives you complete freedom to
28
+ /// design your `forward()` methods to your liking. However, this also means
29
+ /// there is no unified base type you could store in order to call `forward()`
30
+ /// polymorphically for any module. This is where the `AnyModule` comes in.
31
+ /// Instead of inheritance, it relies on type erasure for polymorphism.
32
+ ///
33
+ /// An `AnyModule` can store any `nn::Module` subclass that provides a
34
+ /// `forward()` method. This `forward()` may accept any types and return any
35
+ /// type. Once stored in an `AnyModule`, you can invoke the underlying module's
36
+ /// `forward()` by calling `AnyModule::forward()` with the arguments you would
37
+ /// supply to the stored module (though see one important limitation below).
38
+ /// Example:
39
+ ///
40
+ /// \rst
41
+ /// .. code-block:: cpp
42
+ ///
43
+ /// struct GenericTrainer {
44
+ /// torch::nn::AnyModule module;
45
+ ///
46
+ /// void train(torch::Tensor input) {
47
+ /// module.forward(input);
48
+ /// }
49
+ /// };
50
+ ///
51
+ /// GenericTrainer trainer1{torch::nn::Linear(3, 4)};
52
+ /// GenericTrainer trainer2{torch::nn::Conv2d(3, 4, 2)};
53
+ /// \endrst
54
+ ///
55
+ /// As `AnyModule` erases the static type of the stored module (and its
56
+ /// `forward()` method) to achieve polymorphism, type checking of arguments is
57
+ /// moved to runtime. That is, passing an argument with an incorrect type to an
58
+ /// `AnyModule` will compile, but throw an exception at runtime:
59
+ ///
60
+ /// \rst
61
+ /// .. code-block:: cpp
62
+ ///
63
+ /// torch::nn::AnyModule module(torch::nn::Linear(3, 4));
64
+ /// // Linear takes a tensor as input, but we are passing an integer.
65
+ /// // This will compile, but throw a `torch::Error` exception at runtime.
66
+ /// module.forward(123);
67
+ /// \endrst
68
+ ///
69
+ /// \rst
70
+ /// .. attention::
71
+ /// One noteworthy limitation of `AnyModule` is that its `forward()` method
72
+ /// does not support implicit conversion of argument types. For example, if
73
+ /// the stored module's `forward()` method accepts a `float` and you call
74
+ /// `any_module.forward(3.4)` (where `3.4` is a `double`), this will throw
75
+ /// an exception.
76
+ /// \endrst
77
+ ///
78
+ /// The return type of the `AnyModule`'s `forward()` method is controlled via
79
+ /// the first template argument to `AnyModule::forward()`. It defaults to
80
+ /// `torch::Tensor`. To change it, you can write `any_module.forward<int>()`,
81
+ /// for example.
82
+ ///
83
+ /// \rst
84
+ /// .. code-block:: cpp
85
+ ///
86
+ /// torch::nn::AnyModule module(torch::nn::Linear(3, 4));
87
+ /// auto output = module.forward(torch::ones({2, 3}));
88
+ ///
89
+ /// struct IntModule {
90
+ /// int forward(int x) { return x; }
91
+ /// };
92
+ /// torch::nn::AnyModule module(IntModule{});
93
+ /// int output = module.forward<int>(5);
94
+ /// \endrst
95
+ ///
96
+ /// The only other method an `AnyModule` provides access to on the stored
97
+ /// module is `clone()`. However, you may acquire a handle on the module via
98
+ /// `.ptr()`, which returns a `shared_ptr<nn::Module>`. Further, if you know
99
+ /// the concrete type of the stored module, you can get a concrete handle to it
100
+ /// using `.get<T>()` where `T` is the concrete module type.
101
+ ///
102
+ /// \rst
103
+ /// .. code-block:: cpp
104
+ ///
105
+ /// torch::nn::AnyModule module(torch::nn::Linear(3, 4));
106
+ /// std::shared_ptr<nn::Module> ptr = module.ptr();
107
+ /// torch::nn::Linear linear(module.get<torch::nn::Linear>());
108
+ /// \endrst
109
+ class AnyModule {
110
+ public:
111
+ /// A default-constructed `AnyModule` is in an empty state.
112
+ AnyModule() = default;
113
+
114
+ /// Constructs an `AnyModule` from a `shared_ptr` to concrete module object.
115
+ template <typename ModuleType>
116
+ explicit AnyModule(std::shared_ptr<ModuleType> module);
117
+
118
+ /// Constructs an `AnyModule` from a concrete module object.
119
+ template <
120
+ typename ModuleType,
121
+ typename = torch::detail::enable_if_module_t<ModuleType>>
122
+ explicit AnyModule(ModuleType&& module);
123
+
124
+ /// Constructs an `AnyModule` from a module holder.
125
+ template <typename ModuleType>
126
+ explicit AnyModule(const ModuleHolder<ModuleType>& module_holder);
127
+
128
+ /// Move construction and assignment is allowed, and follows the default
129
+ /// behavior of move for `std::unique_ptr`.
130
+ AnyModule(AnyModule&&) = default;
131
+ AnyModule& operator=(AnyModule&&) = default;
132
+
133
+ /// Creates a shallow copy of an `AnyModule`.
134
+ AnyModule(const AnyModule& other);
135
+ AnyModule& operator=(const AnyModule& other);
136
+
137
+ /// Creates a deep copy of an `AnyModule` if it contains a module, else an
138
+ /// empty `AnyModule` if it is empty.
139
+ AnyModule clone(optional<Device> device = nullopt) const;
140
+
141
+ /// Assigns a module to the `AnyModule` (to circumvent the explicit
142
+ /// constructor).
143
+ template <typename ModuleType>
144
+ AnyModule& operator=(std::shared_ptr<ModuleType> module);
145
+
146
+ /// Invokes `forward()` on the contained module with the given arguments, and
147
+ /// returns the return value as an `AnyValue`. Use this method when chaining
148
+ /// `AnyModule`s in a loop.
149
+ template <typename... ArgumentTypes>
150
+ AnyValue any_forward(ArgumentTypes&&... arguments);
151
+
152
+ /// Invokes `forward()` on the contained module with the given arguments, and
153
+ /// casts the returned `AnyValue` to the supplied `ReturnType` (which defaults
154
+ /// to `torch::Tensor`).
155
+ template <typename ReturnType = torch::Tensor, typename... ArgumentTypes>
156
+ ReturnType forward(ArgumentTypes&&... arguments);
157
+
158
+ /// Attempts to cast the underlying module to the given module type. Throws an
159
+ /// exception if the types do not match.
160
+ template <typename T, typename = torch::detail::enable_if_module_t<T>>
161
+ T& get();
162
+
163
+ /// Attempts to cast the underlying module to the given module type. Throws an
164
+ /// exception if the types do not match.
165
+ template <typename T, typename = torch::detail::enable_if_module_t<T>>
166
+ const T& get() const;
167
+
168
+ /// Returns the contained module in a `nn::ModuleHolder` subclass if possible
169
+ /// (i.e. if `T` has a constructor for the underlying module type).
170
+ template <typename T, typename ContainedType = typename T::ContainedType>
171
+ T get() const;
172
+
173
+ /// Returns a `std::shared_ptr` whose dynamic type is that of the underlying
174
+ /// module.
175
+ std::shared_ptr<Module> ptr() const;
176
+
177
+ /// Like `ptr()`, but casts the pointer to the given type.
178
+ template <typename T, typename = torch::detail::enable_if_module_t<T>>
179
+ std::shared_ptr<T> ptr() const;
180
+
181
+ /// Returns the `type_info` object of the contained value.
182
+ const std::type_info& type_info() const;
183
+
184
+ /// Returns true if the `AnyModule` does not contain a module.
185
+ bool is_empty() const noexcept;
186
+
187
+ private:
188
+ /// Creates a `unique_ptr<AnyModulePlaceholder>` pointing to a
189
+ /// `AnyModuleHolder` of the correct type. This method is used to deduce the
190
+ /// arguments of the module's `forward()` method.
191
+ template <
192
+ typename ModuleType,
193
+ typename Class,
194
+ typename ReturnType,
195
+ typename... ArgumentTypes>
196
+ std::unique_ptr<AnyModulePlaceholder> make_holder(
197
+ std::shared_ptr<ModuleType>&& module,
198
+ ReturnType (Class::*)(ArgumentTypes...));
199
+
200
+ /// Helper method invoked by const and non-const `get()`.
201
+ template <typename ModuleType, typename ReturnType, typename... ArgumentTypes>
202
+ ModuleType& get_(ReturnType (ModuleType::*)(ArgumentTypes...)) const;
203
+
204
+ /// Helper method invoked by const and non-const `get()`.
205
+ template <typename ModuleType>
206
+ ModuleType& get_() const;
207
+
208
+ /// The type erased module.
209
+ std::unique_ptr<AnyModulePlaceholder> content_;
210
+ };
211
+
212
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModule ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
213
+
214
+ template <typename ModuleType>
215
+ AnyModule::AnyModule(std::shared_ptr<ModuleType> module)
216
+ : content_(make_holder(
217
+ std::move(module),
218
+ &std::remove_reference<ModuleType>::type::forward)) {
219
+ // `AnyModule` can only store an `nn::Module` subclass object that provides
220
+ // a `forward()` method that has a non-templatized return type.
221
+ // (e.g. `AnyModule` cannot store `nn::Sequential`, because `nn::Sequential`'s
222
+ // `forward()` method has a templatized return type.)
223
+ static_assert(
224
+ torch::detail::is_module<ModuleType>::value,
225
+ "Can only store object derived from nn::Module into AnyModule");
226
+ static_assert(
227
+ torch::detail::has_forward<ModuleType>::value,
228
+ "Can only store module with a forward() method that has a non-templatized"
229
+ " argument type and return type into AnyModule (e.g. we cannot store nn::Sequential"
230
+ "into AnyModule, because its forward() method's argument type and return type are templatized."
231
+ " If you need to use nn::Sequentials inside each other you can subclass "
232
+ "nn::Sequential and write a non-templatized forward function for it. You can checkout "
233
+ "https://github.com/pytorch/vision/blob/2f46070f3cb1ea894d82578f3dc5677f82f34958/torchvision/csrc/models/mnasnet.cpp#L59 "
234
+ "for an example on how to do this.).");
235
+ }
236
+
237
+ template <typename ModuleType, typename>
238
+ AnyModule::AnyModule(ModuleType&& module)
239
+ : AnyModule(
240
+ std::make_shared<ModuleType>(std::forward<ModuleType>(module))) {}
241
+
242
+ template <typename ModuleType>
243
+ AnyModule::AnyModule(const ModuleHolder<ModuleType>& module_holder)
244
+ : AnyModule(module_holder.ptr()) {}
245
+
246
+ inline AnyModule::AnyModule(const AnyModule& other)
247
+ : content_(other.content_ ? other.content_->copy() : nullptr) {}
248
+
249
+ inline AnyModule& AnyModule::operator=(const AnyModule& other) {
250
+ if (this != &other) {
251
+ content_ = other.content_ ? other.content_->copy() : nullptr;
252
+ }
253
+ return *this;
254
+ }
255
+
256
+ inline AnyModule AnyModule::clone(optional<Device> device) const {
257
+ AnyModule clone;
258
+ clone.content_ = content_ ? content_->clone_module(device) : nullptr;
259
+ return clone;
260
+ }
261
+
262
+ template <typename ModuleType>
263
+ AnyModule& AnyModule::operator=(std::shared_ptr<ModuleType> module) {
264
+ // NOLINTNEXTLINE(cppcoreguidelines-c-copy-assignment-signature)
265
+ return (*this = AnyModule(std::move(module)));
266
+ }
267
+
268
+ template <typename... ArgumentTypes>
269
+ AnyValue AnyModule::any_forward(ArgumentTypes&&... arguments) {
270
+ TORCH_CHECK(!is_empty(), "Cannot call forward() on an empty AnyModule");
271
+ std::vector<AnyValue> values;
272
+ values.reserve(sizeof...(ArgumentTypes));
273
+ torch::apply(
274
+ [&values](AnyValue&& value) { values.push_back(std::move(value)); },
275
+ AnyValue(std::forward<ArgumentTypes>(arguments))...);
276
+ return content_->forward(std::move(values));
277
+ }
278
+
279
+ template <typename ReturnType, typename... ArgumentTypes>
280
+ ReturnType AnyModule::forward(ArgumentTypes&&... arguments) {
281
+ return any_forward(std::forward<ArgumentTypes>(arguments)...)
282
+ .template get<ReturnType>();
283
+ }
284
+
285
+ template <typename T, typename>
286
+ T& AnyModule::get() {
287
+ TORCH_CHECK(!is_empty(), "Cannot call get() on an empty AnyModule");
288
+ return get_<T>();
289
+ }
290
+
291
+ template <typename T, typename>
292
+ const T& AnyModule::get() const {
293
+ TORCH_CHECK(!is_empty(), "Cannot call get() on an empty AnyModule");
294
+ return get_<T>();
295
+ }
296
+
297
+ template <typename T, typename ContainedType>
298
+ T AnyModule::get() const {
299
+ return T(ptr<ContainedType>());
300
+ }
301
+
302
+ inline std::shared_ptr<Module> AnyModule::ptr() const {
303
+ TORCH_CHECK(!is_empty(), "Cannot call ptr() on an empty AnyModule");
304
+ return content_->ptr();
305
+ }
306
+
307
+ template <typename T, typename>
308
+ std::shared_ptr<T> AnyModule::ptr() const {
309
+ TORCH_CHECK(!is_empty(), "Cannot call ptr() on an empty AnyModule");
310
+ // Call get() but discard the value, just to do the type checking.
311
+ get_<T>();
312
+ return std::dynamic_pointer_cast<T>(ptr());
313
+ }
314
+
315
+ inline const std::type_info& AnyModule::type_info() const {
316
+ TORCH_CHECK(!is_empty(), "Cannot call type_info() on an empty AnyModule");
317
+ return content_->type_info;
318
+ }
319
+
320
+ inline bool AnyModule::is_empty() const noexcept {
321
+ return content_ == nullptr;
322
+ }
323
+
324
+ // Private Methods
325
+
326
+ template <
327
+ typename ModuleType,
328
+ typename Class,
329
+ typename ReturnType,
330
+ typename... ArgumentTypes>
331
+ std::unique_ptr<AnyModulePlaceholder> AnyModule::make_holder(
332
+ std::shared_ptr<ModuleType>&& module,
333
+ ReturnType (Class::*)(ArgumentTypes...)) {
334
+ static_assert(
335
+ torch::detail::check_not_lvalue_references<ArgumentTypes...>(),
336
+ "Modules stored inside AnyModule must not take references. "
337
+ "Use pointers instead.");
338
+ static_assert(
339
+ !std::is_void<ReturnType>::value,
340
+ "AnyModule cannot store modules that return void "
341
+ "(you can return a dummy value).");
342
+ return std::make_unique<
343
+ AnyModuleHolder<decay_t<ModuleType>, ArgumentTypes...>>(
344
+ std::move(module));
345
+ }
346
+
347
+ template <typename ModuleType>
348
+ ModuleType& AnyModule::get_() const {
349
+ using M = typename std::remove_reference<ModuleType>::type;
350
+ static_assert(
351
+ torch::detail::has_forward<M>::value,
352
+ "Can only call AnyModule::get<T> with a type T that has a forward method");
353
+ return get_(&M::forward);
354
+ }
355
+
356
+ template <typename ModuleType, typename ReturnType, typename... ArgumentTypes>
357
+ ModuleType& AnyModule::get_(
358
+ ReturnType (ModuleType::*)(ArgumentTypes...)) const {
359
+ if (typeid(ModuleType).hash_code() == type_info().hash_code()) {
360
+ return *static_cast<AnyModuleHolder<ModuleType, ArgumentTypes...>&>(
361
+ *content_)
362
+ .module;
363
+ }
364
+ AT_ERROR(
365
+ "Attempted to cast module of type ",
366
+ c10::demangle(type_info().name()),
367
+ " to type ",
368
+ c10::demangle(typeid(ModuleType).name()));
369
+ }
370
+
371
+ } // namespace nn
372
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_value.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/detail/static.h>
4
+ #include <torch/nn/module.h>
5
+ #include <torch/nn/pimpl.h>
6
+ #include <torch/types.h>
7
+
8
+ #include <torch/csrc/autograd/variable.h>
9
+ #include <torch/csrc/utils/variadic.h>
10
+
11
+ #include <memory>
12
+ #include <type_traits>
13
+ #include <typeinfo>
14
+ #include <utility>
15
+
16
+ namespace torch {
17
+ namespace nn {
18
+
19
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyValue ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20
+
21
+ /// An implementation of `std::any` which stores
22
+ /// a type erased object, whose concrete value can be retrieved at runtime by
23
+ /// checking if the `typeid()` of a requested type matches the `typeid()` of
24
+ /// the object stored.
25
+ class AnyValue {
26
+ public:
27
+ /// Move construction and assignment is allowed, and follows the default
28
+ /// behavior of move for `std::unique_ptr`.
29
+ AnyValue(AnyValue&&) = default;
30
+ AnyValue& operator=(AnyValue&&) = default;
31
+
32
+ /// Copy construction and assignment is allowed.
33
+ AnyValue(const AnyValue& other) : content_(other.content_->clone()) {}
34
+ AnyValue& operator=(const AnyValue& other) {
35
+ content_ = other.content_->clone();
36
+ return *this;
37
+ }
38
+
39
+ /// Constructs the `AnyValue` from value type.
40
+ template <typename T>
41
+ // NOLINTNEXTLINE(bugprone-forwarding-reference-overload)
42
+ explicit AnyValue(T&& value)
43
+ : content_(std::make_unique<Holder<decay_t<T>>>(std::forward<T>(value))) {
44
+ }
45
+
46
+ /// Returns a pointer to the value contained in the `AnyValue` if the type
47
+ /// passed as template parameter matches the type of the value stored, and
48
+ /// returns a null pointer otherwise.
49
+ template <typename T>
50
+ T* try_get() {
51
+ static_assert(
52
+ !std::is_reference<T>::value,
53
+ "AnyValue stores decayed types, you cannot cast it to a reference type");
54
+ static_assert(
55
+ !std::is_array<T>::value,
56
+ "AnyValue stores decayed types, you must cast it to T* instead of T[]");
57
+ if (typeid(T).hash_code() == type_info().hash_code()) {
58
+ return &static_cast<Holder<T>&>(*content_).value;
59
+ }
60
+ return nullptr;
61
+ }
62
+
63
+ /// Returns the value contained in the `AnyValue` if the type passed as
64
+ /// template parameter matches the type of the value stored, and throws an
65
+ /// exception otherwise.
66
+ template <typename T>
67
+ T get() {
68
+ if (auto* maybe_value = try_get<T>()) {
69
+ return *maybe_value;
70
+ }
71
+ AT_ERROR(
72
+ "Attempted to cast AnyValue to ",
73
+ c10::demangle(typeid(T).name()),
74
+ ", but its actual type is ",
75
+ c10::demangle(type_info().name()));
76
+ }
77
+
78
+ /// Returns the `type_info` object of the contained value.
79
+ const std::type_info& type_info() const noexcept {
80
+ return content_->type_info;
81
+ }
82
+
83
+ private:
84
+ friend struct AnyModulePlaceholder;
85
+ friend struct TestAnyValue;
86
+
87
+ /// \internal
88
+ /// The static type of the object we store in the `AnyValue`, which erases the
89
+ /// actual object's type, allowing us only to check the `type_info` of the
90
+ /// type stored in the dynamic type.
91
+ struct Placeholder {
92
+ explicit Placeholder(const std::type_info& type_info_) noexcept
93
+ : type_info(type_info_) {}
94
+ Placeholder(const Placeholder&) = default;
95
+ Placeholder(Placeholder&&) = default;
96
+ virtual ~Placeholder() = default;
97
+ virtual std::unique_ptr<Placeholder> clone() const {
98
+ TORCH_CHECK(false, "clone() should only be called on `AnyValue::Holder`");
99
+ }
100
+ const std::type_info& type_info;
101
+ };
102
+
103
+ /// \internal
104
+ /// The dynamic type of the object we store in the `AnyValue`, which hides the
105
+ /// actual object we have erased in this `AnyValue`.
106
+ template <typename T>
107
+ struct Holder : public Placeholder {
108
+ /// A template because T&& would not be universal reference here.
109
+ template <typename U>
110
+ // NOLINTNEXTLINE(bugprone-forwarding-reference-overload)
111
+ explicit Holder(U&& value_) noexcept
112
+ : Placeholder(typeid(T)), value(std::forward<U>(value_)) {}
113
+ std::unique_ptr<Placeholder> clone() const override {
114
+ return std::make_unique<Holder<T>>(value);
115
+ }
116
+ T value;
117
+ };
118
+
119
+ /// The type erased object.
120
+ std::unique_ptr<Placeholder> content_;
121
+ };
122
+
123
+ } // namespace nn
124
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/functional.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/utils/variadic.h>
5
+ #include <torch/nn/cloneable.h>
6
+ #include <torch/nn/pimpl.h>
7
+ #include <torch/types.h>
8
+
9
+ #include <functional>
10
+ #include <utility>
11
+
12
+ namespace torch {
13
+ namespace nn {
14
+
15
+ /// Wraps a function in a `Module`.
16
+ ///
17
+ /// The `Functional` module allows wrapping an arbitrary function or function
18
+ /// object in an `nn::Module`. This is primarily handy for usage in
19
+ /// `Sequential`.
20
+ ///
21
+ /// \rst
22
+ /// .. code-block:: cpp
23
+ ///
24
+ /// Sequential sequential(
25
+ /// Linear(3, 4),
26
+ /// Functional(torch::relu),
27
+ /// BatchNorm1d(3),
28
+ /// Functional(torch::elu, /*alpha=*/1));
29
+ /// \endrst
30
+ ///
31
+ /// While a `Functional` module only accepts a single `Tensor` as input, it is
32
+ /// possible for the wrapped function to accept further arguments. However,
33
+ /// these have to be bound *at construction time*. For example, if
34
+ /// you want to wrap `torch::leaky_relu`, which accepts a `slope` scalar as its
35
+ /// second argument, with a particular value for its `slope` in a `Functional`
36
+ /// module, you could write
37
+ ///
38
+ /// \rst
39
+ /// .. code-block:: cpp
40
+ ///
41
+ /// Functional(torch::leaky_relu, /*slope=*/0.5)
42
+ /// \endrst
43
+ ///
44
+ /// The value of `0.5` is then stored within the `Functional` object and
45
+ /// supplied to the function call at invocation time. Note that such bound
46
+ /// values are evaluated eagerly and stored a single time. See the documentation
47
+ /// of [std::bind](https://en.cppreference.com/w/cpp/utility/functional/bind)
48
+ /// for more information on the semantics of argument binding.
49
+ ///
50
+ /// \rst
51
+ /// .. attention::
52
+ /// After passing any bound arguments, the function must accept a single
53
+ /// tensor and return a single tensor.
54
+ /// \endrst
55
+ ///
56
+ /// Note that `Functional` overloads the call operator (`operator()`) such that
57
+ /// you can invoke it with `my_func(...)`.
58
+ class TORCH_API FunctionalImpl : public torch::nn::Cloneable<FunctionalImpl> {
59
+ public:
60
+ using Function = std::function<Tensor(Tensor)>;
61
+
62
+ /// Constructs a `Functional` from a function object.
63
+ explicit FunctionalImpl(Function function);
64
+
65
+ template <
66
+ typename SomeFunction,
67
+ typename... Args,
68
+ typename = torch::enable_if_t<(sizeof...(Args) > 0)>>
69
+ explicit FunctionalImpl(SomeFunction original_function, Args&&... args)
70
+ // NOLINTNEXTLINE(modernize-avoid-bind)
71
+ : function_(std::bind(
72
+ original_function,
73
+ /*input=*/std::placeholders::_1,
74
+ std::forward<Args>(args)...)) {
75
+ // std::bind is normally evil, but (1) gcc is broken w.r.t. handling
76
+ // parameter pack expansion in lambdas and (2) moving parameter packs into
77
+ // a lambda only works with C++14, so std::bind is the more move-aware
78
+ // solution here.
79
+ }
80
+
81
+ void reset() override;
82
+
83
+ /// Pretty prints the `Functional` module into the given `stream`.
84
+ void pretty_print(std::ostream& stream) const override;
85
+
86
+ /// Forwards the `input` tensor to the underlying (bound) function object.
87
+ Tensor forward(Tensor input);
88
+
89
+ /// Calls forward(input).
90
+ Tensor operator()(Tensor input);
91
+
92
+ bool is_serializable() const override;
93
+
94
+ private:
95
+ Function function_;
96
+ };
97
+
98
+ /// A `ModuleHolder` subclass for `FunctionalImpl`.
99
+ /// See the documentation for `FunctionalImpl` class to learn what methods it
100
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
101
+ /// module storage semantics.
102
+ TORCH_MODULE(Functional);
103
+
104
+ } // namespace nn
105
+ } // namespace torch