applied-ai-018 commited on
Commit
b8cf40c
·
verified ·
1 Parent(s): 6ad7cd1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader.h +57 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/base.h +255 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateful.h +65 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateless.h +82 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader_options.h +65 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets.h +9 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/chunk.h +529 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/detail/data_shuttle.h +87 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/detail/queue.h +84 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/detail/sequencers.h +113 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/example.h +55 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/iterator.h +178 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers.h +9 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/base.h +47 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms.h +7 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/base.h +53 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/collate.h +35 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/lambda.h +56 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/stack.h +49 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/tensor.h +77 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/worker_exception.h +38 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/conv.h +301 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/padding.h +58 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/upsampling.h +290 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/_functions.h +26 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/activation.h +875 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/adaptive.h +109 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/batchnorm.h +250 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/common.h +97 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any.h +372 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_module_holder.h +133 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_value.h +124 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/functional.h +105 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/moduledict.h +262 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/modulelist.h +274 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/named_any.h +94 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterdict.h +148 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterlist.h +169 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/sequential.h +390 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/conv.h +453 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/distance.h +86 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/dropout.h +190 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/embedding.h +171 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/fold.h +87 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/instancenorm.h +153 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/linear.h +214 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/loss.h +805 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/normalization.h +198 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/padding.h +378 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/pixelshuffle.h +88 -0
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/dataloader/stateful.h>
4
+ #include <torch/data/dataloader/stateless.h>
5
+
6
+ #include <torch/csrc/utils/variadic.h>
7
+
8
+ #include <c10/util/Exception.h>
9
+
10
+ #include <cstddef>
11
+ #include <memory>
12
+ #include <type_traits>
13
+ #include <utility>
14
+
15
+ namespace torch {
16
+ namespace data {
17
+
18
+ /// Creates a `DataLoader` instance for a stateless `dataset`, a `sampler` and
19
+ /// some `options`.
20
+ template <typename Dataset, typename Sampler>
21
+ torch::disable_if_t<
22
+ Dataset::is_stateful,
23
+ std::unique_ptr<StatelessDataLoader<Dataset, Sampler>>>
24
+ make_data_loader(Dataset dataset, Sampler sampler, DataLoaderOptions options) {
25
+ return std::make_unique<StatelessDataLoader<Dataset, Sampler>>(
26
+ std::move(dataset), std::move(sampler), std::move(options));
27
+ }
28
+
29
+ /// Creates a `DataLoader` instance for a stateless `dataset` and some
30
+ /// `options`. A sampler (by default a `RandomSampler`) will be constructed from
31
+ /// the size of the dataset.
32
+ template <typename Sampler = samplers::RandomSampler, typename Dataset>
33
+ torch::disable_if_t<
34
+ Dataset::is_stateful || !std::is_constructible<Sampler, size_t>::value,
35
+ std::unique_ptr<StatelessDataLoader<Dataset, Sampler>>>
36
+ make_data_loader(
37
+ Dataset dataset,
38
+ DataLoaderOptions options = DataLoaderOptions()) {
39
+ const optional<size_t> size = dataset.size();
40
+ TORCH_CHECK(
41
+ size.has_value(),
42
+ "Expected the dataset to be sized in "
43
+ "order to construct the Sampler");
44
+ return make_data_loader(
45
+ std::move(dataset), Sampler(*size), std::move(options));
46
+ }
47
+
48
+ /// Creates a `DataLoader` for a stateful `dataset` and some `options`.
49
+ template <typename Dataset, typename = torch::enable_if_t<Dataset::is_stateful>>
50
+ std::unique_ptr<StatefulDataLoader<Dataset>> make_data_loader(
51
+ Dataset dataset,
52
+ DataLoaderOptions options = DataLoaderOptions()) {
53
+ return std::make_unique<StatefulDataLoader<Dataset>>(
54
+ std::move(dataset), std::move(options));
55
+ }
56
+ } // namespace data
57
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/base.h ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/dataloader_options.h>
4
+ #include <torch/data/detail/data_shuttle.h>
5
+ #include <torch/data/detail/sequencers.h>
6
+ #include <torch/data/iterator.h>
7
+ #include <torch/data/samplers/random.h>
8
+ #include <torch/data/worker_exception.h>
9
+ #include <torch/types.h>
10
+
11
+ #include <torch/csrc/utils/variadic.h>
12
+
13
+ #include <c10/util/Exception.h>
14
+ #include <c10/util/irange.h>
15
+
16
+ #include <cstddef>
17
+ #include <exception>
18
+ #include <memory>
19
+ #include <thread>
20
+ #include <type_traits>
21
+ #include <utility>
22
+ #include <vector>
23
+
24
+ namespace torch {
25
+ namespace data {
26
+ template <typename Dataset, typename Batch, typename BatchRequest>
27
+ class DataLoaderBase {
28
+ public:
29
+ using BatchType = Batch;
30
+ using BatchRequestType = BatchRequest;
31
+
32
+ /// Constructs a new DataLoader from a `dataset` to sample from, `options`
33
+ /// to configure the DataLoader with, and a `sampler` that specifies the
34
+ /// sampling strategy.
35
+ DataLoaderBase(
36
+ DataLoaderOptions options,
37
+ std::unique_ptr<Dataset> main_thread_dataset = nullptr)
38
+ : options_(std::move(options)),
39
+ main_thread_dataset_(std::move(main_thread_dataset)),
40
+ sequencer_(new_sequencer()) {}
41
+
42
+ // NOLINTNEXTLINE(bugprone-exception-escape)
43
+ virtual ~DataLoaderBase() {
44
+ join();
45
+ }
46
+
47
+ /// Returns an iterator into the DataLoader. The lifetime of the iterator is
48
+ /// bound to the DataLoader. In C++ standards language, the category of the
49
+ /// iterator is `OutputIterator`. See
50
+ /// https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this
51
+ /// means. In short: you may increment the iterator and dereference it, but
52
+ /// cannot go back, or step forward more than one position at a time. When the
53
+ /// DataLoader is exhausted, it will compare equal with the special
54
+ /// "sentinel" iterator returned by `DataLoader::end()`. Most of the time, you
55
+ /// should only use range-for loops to loop over the DataLoader, but
56
+ /// standard algorithms like `std::copy(dataloader.begin(), dataloader.end(),
57
+ /// output_iterator)` are supported too.
58
+ Iterator<Batch> begin() {
59
+ TORCH_CHECK(
60
+ shuttle_.in_flight_jobs() == 0,
61
+ "Attempted to get a new DataLoader iterator "
62
+ "while another iterator is not yet exhausted");
63
+ reset();
64
+ return Iterator<Batch>(std::make_unique<detail::ValidIterator<Batch>>(
65
+ [this] { return this->next(); }));
66
+ }
67
+
68
+ /// Returns a special "sentinel" iterator that compares equal with a
69
+ /// non-sentinel iterator once the DataLoader is exhausted.
70
+ Iterator<Batch> end() {
71
+ return Iterator<Batch>(std::make_unique<detail::SentinelIterator<Batch>>());
72
+ }
73
+
74
+ /// Joins the DataLoader's worker threads and drains internal queues.
75
+ /// This function may only be invoked from the main thread (in which the
76
+ /// DataLoader lives).
77
+ void join() {
78
+ if (joined_) {
79
+ return;
80
+ }
81
+ shuttle_.drain();
82
+ // Send one 'quit' message per worker. Since a worker dies (exits its
83
+ // thread) after receiving this message, each `QuitWorker()` message will be
84
+ // read by exactly one worker.
85
+ for (const auto w : c10::irange(options_.workers)) {
86
+ (void)w; // Suppress unused variable warning
87
+ push_job(QuitWorker());
88
+ }
89
+ for (auto& worker : workers_) {
90
+ worker.join();
91
+ }
92
+ joined_ = true;
93
+ }
94
+
95
+ /// Returns the options with which the DataLoader was configured.
96
+ const FullDataLoaderOptions& options() const noexcept {
97
+ return options_;
98
+ }
99
+
100
+ protected:
101
+ /// Simple mix-in to give something a sequence number.
102
+ struct Sequenced {
103
+ Sequenced() = default;
104
+ Sequenced(size_t sqn) : sequence_number(sqn) {}
105
+ size_t sequence_number;
106
+ };
107
+
108
+ struct QuitWorker {};
109
+
110
+ /// A `Job` is either a `BatchRequest` (new indices to fetch data at) or a
111
+ /// `QuitWorker` object, to indicate the worker should shut down.
112
+ struct Job : Sequenced {
113
+ Job() = default;
114
+ Job(QuitWorker q, size_t sqn) : Sequenced(sqn), quit(q) {}
115
+ Job(BatchRequest&& i, size_t sqn)
116
+ : Sequenced(sqn), batch_request(std::move(i)) {}
117
+ optional<QuitWorker> quit;
118
+ optional<BatchRequest> batch_request;
119
+ };
120
+
121
+ /// The finished result of a job.
122
+ struct Result : Sequenced {
123
+ Result() = default;
124
+ Result(optional<Batch>&& b, size_t sqn)
125
+ : Sequenced(sqn), batch(std::move(b)) {}
126
+ Result(std::exception_ptr exception, size_t sqn)
127
+ : Sequenced(sqn), exception(std::move(exception)) {}
128
+ optional<Batch> batch;
129
+ std::exception_ptr exception;
130
+ };
131
+
132
+ /// Subclass hook for getting the next batch request. The stateless case will
133
+ /// ask the sampler for a new batch request (e.g. a vector of indices), while
134
+ /// the stateful one will simply return the batch size.
135
+ virtual optional<BatchRequestType> get_batch_request() = 0;
136
+
137
+ /// Resets the internal state of the DataLoader, optionally pre-fetching
138
+ /// new jobs.
139
+ virtual void reset() {
140
+ shuttle_.drain();
141
+ sequence_number_ = 0;
142
+ sequencer_ = new_sequencer();
143
+ prefetch();
144
+ }
145
+
146
+ /// Schedules `requested_jobs` many new batches to be fetched. The actual
147
+ /// number of jobs scheduled may be less if the DataLoader exhausts.
148
+ void prefetch(size_t requested_jobs) {
149
+ for (const auto r : c10::irange(requested_jobs)) {
150
+ (void)r; // Suppress unused variable
151
+ if (auto batch_request = get_batch_request()) {
152
+ this->push_job(std::move(*batch_request));
153
+ } else {
154
+ break;
155
+ }
156
+ }
157
+ }
158
+
159
+ /// Schedules the maximum number of jobs (based on the `max_jobs` option).
160
+ void prefetch() {
161
+ prefetch(options_.max_jobs);
162
+ }
163
+
164
+ /// Returns the next batch of data, or an empty `optional` if the DataLoader
165
+ /// is exhausted. This operation will block until a batch is available if one
166
+ /// is still expected.
167
+ optional<BatchType> next() {
168
+ if (options_.workers > 0) {
169
+ while (optional<Result> result = this->pop_result()) {
170
+ if (result->exception) {
171
+ throw WorkerException(result->exception);
172
+ } else if (result->batch) {
173
+ prefetch(1);
174
+ return std::move(result->batch);
175
+ }
176
+ }
177
+ } else if (auto batch_request = get_batch_request()) {
178
+ return this->main_thread_dataset_->get_batch(std::move(*batch_request));
179
+ }
180
+ return nullopt;
181
+ }
182
+
183
+ /// The function that worker threads run.
184
+ void worker_thread(Dataset& dataset) {
185
+ while (true) {
186
+ auto job = shuttle_.pop_job();
187
+ if (job.quit) {
188
+ break;
189
+ }
190
+ try {
191
+ auto batch = dataset.get_batch(std::move(*job.batch_request));
192
+ shuttle_.push_result({std::move(batch), job.sequence_number});
193
+ } catch (...) {
194
+ shuttle_.push_result({std::current_exception(), job.sequence_number});
195
+ }
196
+ }
197
+ }
198
+
199
+ /// Convenience method that calls `shuttle_.push_job()` with the next sequence
200
+ /// number.
201
+ template <typename T>
202
+ void push_job(T value) {
203
+ shuttle_.push_job({std::move(value), sequence_number_++});
204
+ }
205
+
206
+ /// Convenience method that gets the next result from the sequencer.
207
+ optional<Result> pop_result() {
208
+ return sequencer_->next(
209
+ [this] { return this->shuttle_.pop_result(this->options_.timeout); });
210
+ }
211
+
212
+ /// Convenience method that creates a new sequencer based on the
213
+ /// `enforce_ordering` option.
214
+ std::unique_ptr<detail::sequencers::Sequencer<Result>> new_sequencer() {
215
+ if (options_.enforce_ordering) {
216
+ return std::make_unique<detail::sequencers::OrderedSequencer<Result>>(
217
+ options_.max_jobs);
218
+ }
219
+ return std::make_unique<detail::sequencers::NoSequencer<Result>>();
220
+ }
221
+
222
+ /// The options the DataLoader was configured with.
223
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
224
+ const FullDataLoaderOptions options_;
225
+
226
+ /// The dataset for the main thread, only has a value if the number of
227
+ /// worker threads was configured as zero, meaning the main thread has to do
228
+ /// all the work (synchronously). NOTE: Really want this to be on the heap
229
+ /// when empty, therefore `unique_ptr` and not `optional`.
230
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
231
+ std::unique_ptr<Dataset> main_thread_dataset_;
232
+
233
+ /// The sequence number for the *next* batch to be retrieved from the
234
+ /// dataset.
235
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
236
+ size_t sequence_number_ = 0;
237
+
238
+ /// The worker threads, running the `worker_thread()` method.
239
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
240
+ std::vector<std::thread> workers_;
241
+
242
+ /// The `DataShuttle` which takes care of the life cycle of a job.
243
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
244
+ detail::DataShuttle<Job, Result> shuttle_;
245
+
246
+ /// The `Sequencer`, which handles optional ordering of batches.
247
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
248
+ std::unique_ptr<detail::sequencers::Sequencer<Result>> sequencer_;
249
+
250
+ /// True if the DataLoader has joined its worker threads.
251
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
252
+ bool joined_ = false;
253
+ };
254
+ } // namespace data
255
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateful.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/data/dataloader/base.h>
5
+
6
+ #include <cstddef>
7
+ #include <thread>
8
+ #include <utility>
9
+
10
+ namespace torch {
11
+ namespace data {
12
+
13
+ /// A dataloader for stateful datasets.
14
+ ///
15
+ /// A dataloader for stateful datatasets differs from one for stateless
16
+ /// datasets one in that the dataset is shared among worker threads, and that
17
+ /// this dataset is itself responsible for producing batches rather than
18
+ /// depending on a sampler. The statefulness here actually refers to the
19
+ /// dataset. The StatefulDataLoader simply alters the data loading algorithm to
20
+ /// accommodate the stateful, shared nature of the dataset. Note that the
21
+ /// dataset must be thread safe if more than one worker thread is used.
22
+ ///
23
+ /// A stateful dataloader is created by calling `make_data_loader` with a
24
+ /// stateful dataset.
25
+ template <typename Dataset>
26
+ class StatefulDataLoader : public DataLoaderBase<
27
+ Dataset,
28
+ typename Dataset::BatchType::value_type,
29
+ typename Dataset::BatchRequestType> {
30
+ public:
31
+ using super = DataLoaderBase<
32
+ Dataset,
33
+ typename Dataset::BatchType::value_type,
34
+ typename Dataset::BatchRequestType>;
35
+ using typename super::BatchRequestType;
36
+
37
+ /// Constructs the `StatefulDataLoader` from a `dataset` and some `options`.
38
+ StatefulDataLoader(Dataset dataset, DataLoaderOptions options)
39
+ : super(
40
+ std::move(options),
41
+ std::make_unique<Dataset>(std::move(dataset))) {
42
+ for (const auto w : c10::irange(this->options_.workers)) {
43
+ // As opposed to the stateless case, here all worker threads access the
44
+ // same underlying dataset.
45
+ this->workers_.emplace_back(
46
+ [this] { this->worker_thread(*this->main_thread_dataset_); });
47
+ }
48
+ }
49
+
50
+ private:
51
+ /// Resets the internal state of the dataloader and the dataset.
52
+ void reset() override {
53
+ this->main_thread_dataset_->reset();
54
+ // Call the base class method last because it calls `prefetch()`
55
+ super::reset();
56
+ }
57
+
58
+ /// For stateful datasets, the batch request is always the batch size. The
59
+ /// dataset is responsible for determining what goes into the batch next.
60
+ optional<BatchRequestType> get_batch_request() override {
61
+ return this->options_.batch_size;
62
+ }
63
+ };
64
+ } // namespace data
65
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader/stateless.h ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/dataloader/base.h>
4
+ #include <torch/data/worker_exception.h>
5
+
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ #include <cstddef>
10
+ #include <thread>
11
+ #include <utility>
12
+
13
+ namespace torch {
14
+ namespace data {
15
+
16
+ /// A dataloader for stateless datasets.
17
+ ///
18
+ /// This dataloader follows the traditional PyTorch dataloader design, whereby a
19
+ /// (posssibly) stateful sampler produces *batch requests* for a stateless
20
+ /// dataset, which acts as a simple batch request to batch mapping. The batch
21
+ /// request will often be an array of indices, and if the dataset is a simple
22
+ /// image dataset, the dataset would produce the images at those indices.
23
+ template <typename Dataset, typename Sampler>
24
+ class StatelessDataLoader : public DataLoaderBase<
25
+ Dataset,
26
+ typename Dataset::BatchType,
27
+ typename Sampler::BatchRequestType> {
28
+ public:
29
+ using super = DataLoaderBase<
30
+ Dataset,
31
+ typename Dataset::BatchType,
32
+ typename Sampler::BatchRequestType>;
33
+ using typename super::BatchRequestType;
34
+
35
+ /// Constructs the `StatelessDataLoader` from a `dataset`, a `sampler` and
36
+ /// some `options`.
37
+ StatelessDataLoader(
38
+ Dataset dataset,
39
+ Sampler sampler,
40
+ DataLoaderOptions options)
41
+ : super(std::move(options)), sampler_(std::move(sampler)) {
42
+ for (const auto w : c10::irange(this->options_.workers)) {
43
+ // Here we copy the dataset into the worker thread closure. Each worker
44
+ // has its own copy of the dataset. This means the dataset must be
45
+ // trivially copiable, or else we don't expect more than one worker to
46
+ // be in use.
47
+ (void)w; // Suppress unused variable warning
48
+ this->workers_.emplace_back(
49
+ [this, dataset]() mutable { this->worker_thread(dataset); });
50
+ }
51
+ if (this->options_.workers == 0) {
52
+ this->main_thread_dataset_ =
53
+ std::make_unique<Dataset>(std::move(dataset));
54
+ }
55
+ }
56
+
57
+ private:
58
+ /// Resets the internal state of the dataloader and the sampler.
59
+ void reset() override {
60
+ sampler_.reset();
61
+ // Call the base class method last because it calls `prefetch()`
62
+ super::reset();
63
+ }
64
+
65
+ /// Queries the sampler for the next batch request (possibly progressing its
66
+ /// internal state).
67
+ optional<BatchRequestType> get_batch_request() override {
68
+ auto indices = sampler_.next(this->options_.batch_size);
69
+ if (!indices ||
70
+ (indices->size() < this->options_.batch_size &&
71
+ this->options_.drop_last)) {
72
+ return nullopt;
73
+ }
74
+ AT_ASSERT(indices->size() > 0);
75
+ return indices;
76
+ }
77
+
78
+ /// The `Sampler` used to produce batch requests.
79
+ Sampler sampler_;
80
+ };
81
+ } // namespace data
82
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader_options.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/types.h>
5
+
6
+ #include <chrono>
7
+ #include <cstddef>
8
+
9
+ namespace torch {
10
+ namespace data {
11
+
12
+ /// Options to configure a `DataLoader`.
13
+ struct DataLoaderOptions {
14
+ DataLoaderOptions() = default;
15
+ /* implicit */ DataLoaderOptions(size_t batch_size)
16
+ : batch_size_(batch_size) {}
17
+
18
+ /// The size of each batch to fetch.
19
+ TORCH_ARG(size_t, batch_size) = 1;
20
+
21
+ /// The number of worker threads to launch. If zero, the main thread will
22
+ /// synchronously perform the data loading.
23
+ TORCH_ARG(size_t, workers) = 0;
24
+
25
+ /// The maximum number of jobs to enqueue for fetching by worker threads.
26
+ /// Defaults to two times the number of worker threads.
27
+ TORCH_ARG(optional<size_t>, max_jobs);
28
+
29
+ /// An optional limit on the time to wait for the next batch.
30
+ TORCH_ARG(optional<std::chrono::milliseconds>, timeout);
31
+
32
+ /// Whether to enforce ordering of batches when multiple are loaded
33
+ /// asynchronously by worker threads. Set to `false` for better performance if
34
+ /// you do not care about determinism.
35
+ TORCH_ARG(bool, enforce_ordering) = true;
36
+
37
+ /// Whether to omit the last batch if it contains less than `batch_size`
38
+ /// examples.
39
+ TORCH_ARG(bool, drop_last) = false;
40
+ };
41
+
42
+ /// Like `DataLoaderOptions`, but without any unconfigured state.
43
+ /// `DataLoaderOptions` has some options that depend on other options
44
+ /// (`max_jobs` => `2 * workers`). In the spirit of properly using the C++ type
45
+ /// system, `DataLoaderOptions` allows only setting values. To access values,
46
+ /// you must create a `FullDataLoaderOptions` from a `DataLoaderOptions`
47
+ /// instance, which will do any necessary coalescing.
48
+ struct FullDataLoaderOptions {
49
+ explicit FullDataLoaderOptions(DataLoaderOptions options)
50
+ : batch_size(options.batch_size()),
51
+ workers(options.workers()),
52
+ max_jobs(options.max_jobs().value_or(2 * workers)),
53
+ timeout(options.timeout()),
54
+ enforce_ordering(options.enforce_ordering()),
55
+ drop_last(options.drop_last()) {}
56
+
57
+ size_t batch_size;
58
+ size_t workers;
59
+ size_t max_jobs;
60
+ optional<std::chrono::milliseconds> timeout;
61
+ bool enforce_ordering;
62
+ bool drop_last;
63
+ };
64
+ } // namespace data
65
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/datasets/base.h>
4
+ #include <torch/data/datasets/chunk.h>
5
+ #include <torch/data/datasets/map.h>
6
+ #include <torch/data/datasets/mnist.h>
7
+ #include <torch/data/datasets/shared.h>
8
+ #include <torch/data/datasets/stateful.h>
9
+ #include <torch/data/datasets/tensor.h>
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/chunk.h ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/arg.h>
5
+ #include <torch/data/datasets/stateful.h>
6
+ #include <torch/data/samplers.h>
7
+ #include <queue>
8
+ #include <thread>
9
+
10
+ #include <torch/serialize.h>
11
+
12
+ namespace torch {
13
+ namespace data {
14
+ namespace datasets {
15
+
16
+ /// Interface for chunk reader, which performs data chunking and reading of
17
+ /// entire chunks.
18
+ ///
19
+ /// A chunk could be an entire file, such as an audio data file or an image,
20
+ /// or part of a file in the case of a large text-file split based on seek
21
+ /// positions.
22
+ template <
23
+ typename ExampleType_,
24
+ typename ChunkType_ = std::vector<ExampleType_>>
25
+ class ChunkDataReader {
26
+ public:
27
+ virtual ~ChunkDataReader() = default;
28
+
29
+ using ChunkType = ChunkType_;
30
+ using ExampleType = ExampleType_;
31
+
32
+ /// Read an entire chunk.
33
+ virtual ChunkType read_chunk(size_t chunk_index) = 0;
34
+
35
+ /// Returns the number of chunks available in this reader.
36
+ virtual size_t chunk_count() = 0;
37
+
38
+ /// This will clear any internal state associate with this reader.
39
+ virtual void reset() = 0;
40
+ };
41
+
42
+ namespace detail {
43
+ /// BatchDataBuffer manages a queue of UnwrappedBatchData. After a new chunk is
44
+ /// loaded, BatchDataBuffer splits it into small batches and push them into the
45
+ /// queue. When get_batch is called from data loader, it pops cached batches and
46
+ /// return. If the cache is empty, it either waits to load more chunks or return
47
+ /// null if all chunks are loaded.
48
+ template <
49
+ typename UnwrappedBatch,
50
+ typename ExampleSampler = samplers::RandomSampler>
51
+ class BatchDataBuffer {
52
+ public:
53
+ using UnwrappedBatchType = UnwrappedBatch;
54
+ using BatchType = torch::optional<UnwrappedBatchType>;
55
+ using BatchRequestType = typename ExampleSampler::BatchRequestType;
56
+
57
+ BatchDataBuffer(
58
+ size_t batch_size,
59
+ ExampleSampler& example_sampler,
60
+ size_t queue_capacity)
61
+ : batch_size_(batch_size),
62
+ example_sampler_(example_sampler),
63
+ queue_capacity_(queue_capacity) {}
64
+
65
+ /// Return batch data from the queue. Called from the ChunkDataset main
66
+ /// thread.
67
+ BatchType get_batch() {
68
+ std::unique_lock<std::mutex> lock(queue_mutex_);
69
+ cv_read_.wait(lock, [this] {
70
+ // wait till there is available data in the queue or if all chunks are
71
+ // loaded (i.e. the dataset is exhausted for this epoch)
72
+ return (
73
+ this->total_example_count_in_queue_ >= batch_size_ || this->stop_);
74
+ });
75
+ if (batch_queue_.empty()) {
76
+ AT_ASSERT(stop_);
77
+ // All batches have been retrieved. Return an empty batch.
78
+ return nullopt;
79
+ }
80
+
81
+ UnwrappedBatchData batch = std::move(batch_queue_.front());
82
+ batch_queue_.pop();
83
+ if (batch.exception) {
84
+ throw WorkerException(batch.exception);
85
+ }
86
+
87
+ total_example_count_in_queue_ -= batch.batch_data.size();
88
+ lock.unlock();
89
+ cv_write_.notify_all();
90
+
91
+ return batch.batch_data;
92
+ }
93
+
94
+ /// Push preloaded chunks to batch queue. Called from the ChunkDataset worker
95
+ /// threads.
96
+ void add_chunk_data(UnwrappedBatchType data) {
97
+ std::unique_lock<std::mutex> lock(queue_mutex_);
98
+ cv_write_.wait(lock, [this] {
99
+ // stop loading if we have preloaded enough data.
100
+ return this->total_example_count_in_queue_ < this->queue_capacity_ ||
101
+ this->stop_;
102
+ });
103
+ if (stop_) {
104
+ // When stop_ is true, it means no further chunk loading is necessary.
105
+ // Return without any further processing.
106
+ return;
107
+ }
108
+
109
+ auto data_size = data.size();
110
+ auto remaining_size = data_size;
111
+ example_sampler_.reset(data_size);
112
+
113
+ auto fill_batch = [&](size_t example_count, UnwrappedBatchType& batch) {
114
+ auto batch_example_indices = this->example_sampler_.next(example_count);
115
+ AT_ASSERT(
116
+ batch_example_indices &&
117
+ batch_example_indices.value().size() == example_count);
118
+ BatchRequestType& indices = batch_example_indices.value();
119
+ for (size_t i : indices) {
120
+ TORCH_CHECK(i < data_size, "Index out of range");
121
+ batch.emplace_back(std::move(data[i]));
122
+ }
123
+ remaining_size -= example_count;
124
+ };
125
+
126
+ if (!batch_queue_.empty()) {
127
+ // if the queue has existing data, and the last batch doesn't have enough
128
+ // examples to fill a batch_size batch, add more example to this batch
129
+ // first.
130
+ auto& batch = batch_queue_.back();
131
+ size_t current_count = batch.batch_data.size();
132
+ if (current_count < batch_size_) {
133
+ auto example_count =
134
+ std::min(remaining_size, batch_size_ - current_count);
135
+ fill_batch(example_count, batch.batch_data);
136
+ }
137
+ }
138
+
139
+ // If we still have data remaining after filling the last pushed batch, add
140
+ // them to the queue too.
141
+ // NOLINTNEXTLINE(bugprone-infinite-loop)
142
+ while (remaining_size > 0) {
143
+ UnwrappedBatchType current_batch;
144
+
145
+ // Allocate the batch memory ahead of time.
146
+ current_batch.reserve(batch_size_);
147
+
148
+ auto example_count = std::min(remaining_size, batch_size_);
149
+ fill_batch(example_count, current_batch);
150
+ batch_queue_.emplace(std::move(current_batch));
151
+ }
152
+ total_example_count_in_queue_ += data_size;
153
+ lock.unlock();
154
+ cv_read_.notify_all();
155
+ }
156
+
157
+ /// Push exceptions thrown during preloading into batch queue. Called from
158
+ /// the ChunkDataset worker threads.
159
+ void add_chunk_data(std::exception_ptr e_ptr) {
160
+ std::unique_lock<std::mutex> lock(queue_mutex_);
161
+ cv_write_.wait(lock, [this] {
162
+ // stop loading if we have preloaded enough data.
163
+ return (
164
+ this->total_example_count_in_queue_ < this->queue_capacity_ ||
165
+ this->stop_);
166
+ });
167
+ if (stop_) {
168
+ // When stop_ is true, it means this current thread needs to be tore down,
169
+ // the batch buffer will be discarded, so no need to enqueue any new
170
+ // exceptions.
171
+ return;
172
+ }
173
+
174
+ batch_queue_.emplace(e_ptr);
175
+ lock.unlock();
176
+ cv_read_.notify_all();
177
+ }
178
+
179
+ void stop() {
180
+ {
181
+ // Hold the lock before changing stop_ to prevent a race condition which
182
+ // can cause a deadlock. To be more specific, conditional variable
183
+ // cv_write_ waits on predicate stop_ in add_chunk_data(). The wait
184
+ // happens in two steps: 1) while still holding the lock, check if
185
+ // predicate is true; 2) if it is true, proceeds, otherwise, release the
186
+ // lock and wait until notified. Without holding a lock, cv_write_'s
187
+ // notification can happen in between step 1) and 2). In that case, as
188
+ // cv_write_ is not in waiting status yet, so the notification is lost and
189
+ // cv_write_ will sleep forever. By taking a lock before changing
190
+ // predicate stop_, it is ensured updating and evaluating stop_ always
191
+ // happen in a synchronized way
192
+ std::lock_guard<std::mutex> lock(queue_mutex_);
193
+ stop_ = true;
194
+ }
195
+
196
+ // notify all writers, wake them from wait to exit current method.
197
+ cv_write_.notify_all();
198
+ // notify all readers too.
199
+ cv_read_.notify_all();
200
+ }
201
+ /// The batch size is needed to create batches from the chunk data. Similar to
202
+ /// regular dataloader where the batches are created with prefetches,
203
+ /// BatchDataBuffer perform the batch creation using the provided batch size.
204
+ size_t batch_size_ = 0;
205
+
206
+ /// count of total example stored in the queue
207
+ size_t total_example_count_in_queue_ = 0;
208
+
209
+ /// struct that contains a raw unwrapped batch unit. An unwrapped batch unit
210
+ /// is the raw data without 'optional' wrapper. It can be a collection of
211
+ /// images, utterances, e.t.c.
212
+ struct UnwrappedBatchData {
213
+ explicit UnwrappedBatchData(UnwrappedBatchType data)
214
+ : batch_data(std::move(data)) {}
215
+
216
+ // NOLINTNEXTLINE(modernize-pass-by-value)
217
+ explicit UnwrappedBatchData(std::exception_ptr e) : exception(e) {}
218
+
219
+ /// batch data to return
220
+ UnwrappedBatchType batch_data;
221
+
222
+ /// exception pointer which captures any abnormal exceptions while creating
223
+ /// the batch.
224
+ std::exception_ptr exception;
225
+ };
226
+
227
+ /// local cache to store example batches from loaded chunk
228
+ std::queue<UnwrappedBatchData> batch_queue_;
229
+
230
+ // sync batch_queue_ update.
231
+ std::mutex queue_mutex_;
232
+
233
+ std::condition_variable cv_read_;
234
+ std::condition_variable cv_write_;
235
+
236
+ ExampleSampler& example_sampler_;
237
+
238
+ // configurable maximun number of elements the queue can hold at one time.
239
+ size_t queue_capacity_;
240
+
241
+ // When set to true, it wakes the writer threads from the wait and exit
242
+ // current function call. This is needed when ChunkDataSet.Reset is called
243
+ // while the previous epoch is not exhausted yet. When ChunkDataset is waiting
244
+ // its preloader to finish previous work before tearing down the thread, the
245
+ // preloader could be still waiting for the conditional variable, thus cause
246
+ // the program to hang. This boolean is used to break this waiting condition.
247
+ bool stop_ = false;
248
+ };
249
+ } // namespace detail
250
+
251
+ /// Options to configure a `ChunkDataset`.
252
+ struct ChunkDatasetOptions {
253
+ ChunkDatasetOptions() = delete;
254
+ ChunkDatasetOptions(
255
+ size_t preloader_count,
256
+ size_t batch_size,
257
+ size_t cache_size = 2048,
258
+ size_t cross_chunk_shuffle_count = 1)
259
+ : preloader_count_(preloader_count),
260
+ batch_size_(batch_size),
261
+ cache_size_(cache_size),
262
+ cross_chunk_shuffle_count_(cross_chunk_shuffle_count) {
263
+ TORCH_CHECK(
264
+ preloader_count_ > 0,
265
+ "Preloader count is 0. At least one preloader needs to be specified.");
266
+ TORCH_CHECK(
267
+ batch_size_ > 0,
268
+ "Batch size is 0. A positive batch size needs to be specified.");
269
+ TORCH_CHECK(
270
+ cache_size_ > 0,
271
+ "Cache size is 0. A positive cache size needs to be specified.");
272
+ TORCH_CHECK(
273
+ cache_size_ >= batch_size_,
274
+ "Cache size is less than batch size. Cache needs to be large enough to "
275
+ "hold at least one batch.");
276
+ TORCH_CHECK(
277
+ cross_chunk_shuffle_count_ > 0,
278
+ "cross_chunk_shuffle_count needs to be greater than 0.");
279
+ }
280
+
281
+ /// The number of worker thread to preload chunk data.
282
+ TORCH_ARG(size_t, preloader_count);
283
+
284
+ /// The size of each batch.
285
+ TORCH_ARG(size_t, batch_size);
286
+
287
+ /// The capacity of the queue for batch caching.
288
+ TORCH_ARG(size_t, cache_size) = 2048;
289
+
290
+ // The number of chunks to perfrom cross-chunk shuffling. Default to 1 meaning
291
+ // no cross-chunk shuffling. When it is equal to n (n > 1), n random
292
+ // chunks will be loaded at once and example shuffling will be performed
293
+ // across all those n chunks.
294
+ // Note: Usually the default config (1 chunk shuffle + example shuffle) is
295
+ // good enough to generate random distributed data. Use this parameter only if
296
+ // you know cross-shuffle is needed in your case. Also there is a performance
297
+ // penalty when this value is greater than 1, as we need to do extra merge
298
+ // between multiple chunks before performing example sampling.
299
+ TORCH_ARG(size_t, cross_chunk_shuffle_count) = 1;
300
+ };
301
+
302
+ /// A stateful dataset that support hierarchical sampling and prefetching of
303
+ /// entre chunks.
304
+ ///
305
+ /// Unlike regular dataset, chunk dataset require two samplers to operate and
306
+ /// keeps an internal state. `ChunkSampler` selects, which chunk to load next,
307
+ /// while the `ExampleSampler` determins the order of Examples that are returned
308
+ /// in each `get_batch` call. The hierarchical sampling approach used here is
309
+ /// inspired by this paper http://martin.zinkevich.org/publications/nips2010.pdf
310
+ template <
311
+ typename ChunkReader,
312
+ typename ChunkSampler = samplers::RandomSampler,
313
+ typename ExampleSampler = samplers::RandomSampler>
314
+ class ChunkDataset final
315
+ : public StatefulDataset<
316
+ ChunkDataset<ChunkReader, ChunkSampler, ExampleSampler>,
317
+ typename ChunkReader::BatchType,
318
+ size_t> {
319
+ public:
320
+ using BatchType = torch::optional<typename ChunkReader::BatchType>;
321
+ using UnwrappedBatchType = typename ChunkReader::BatchType;
322
+ using BatchRequestType = size_t;
323
+ using ChunkSamplerType = ChunkSampler;
324
+ using ExampleSamplerType = ExampleSampler;
325
+
326
+ ChunkDataset(
327
+ ChunkReader chunk_reader,
328
+ ChunkSampler chunk_sampler,
329
+ ExampleSampler example_sampler,
330
+ ChunkDatasetOptions options,
331
+ std::function<void(UnwrappedBatchType&)> preprocessing_policy =
332
+ std::function<void(UnwrappedBatchType&)>())
333
+ : chunk_reader_(std::move(chunk_reader)),
334
+ chunk_sampler_(std::move(chunk_sampler)),
335
+ example_sampler_(std::move(example_sampler)),
336
+ options_(std::move(options)),
337
+ preprocessing_policy_(std::move(preprocessing_policy)),
338
+ quit_worker_(false),
339
+ running_preloaders_(0),
340
+ load_checkpoint_(false) {}
341
+
342
+ ~ChunkDataset() override {
343
+ // stop batch buffer first.
344
+ if (batch_buffer_) {
345
+ batch_buffer_->stop();
346
+ }
347
+ free_workers();
348
+ }
349
+
350
+ /// Default get_batch method of BatchDataset. This method returns
351
+ /// Example batches created from the preloaded chunks. The implemenation
352
+ /// is dataset agnostic and does not need overriding in different chunk
353
+ /// datasets.
354
+ BatchType get_batch(size_t batch_size) override {
355
+ TORCH_CHECK(
356
+ batch_buffer_ != nullptr,
357
+ "Dataset needs to call reset() before calling get_batch().");
358
+
359
+ TORCH_CHECK(
360
+ batch_size == options_.batch_size(),
361
+ "The requested batch size does not match with the initialized batch size.\n"
362
+ " The requested batch size is ",
363
+ batch_size,
364
+ ", while the dataset is created with batch size equal to ",
365
+ options_.batch_size());
366
+ return batch_buffer_->get_batch();
367
+ }
368
+
369
+ /// Helper method around get_batch as `batch_size` is not strictly necessary
370
+ BatchType get_batch() {
371
+ return get_batch(options_.batch_size());
372
+ }
373
+
374
+ /// This will clear any internal state and starts the internal prefetching
375
+ /// mechanism for the chunk dataset.
376
+ void reset() override {
377
+ // We need this to support partial data reads via dataloader iterator.
378
+ if (batch_buffer_) {
379
+ batch_buffer_->stop();
380
+ }
381
+ // free workers from previous reset if there is any.
382
+ free_workers();
383
+ preload_threads_.clear();
384
+
385
+ if (!load_checkpoint_) {
386
+ chunk_reader_.reset();
387
+ chunk_sampler_.reset(chunk_reader_.chunk_count());
388
+ load_checkpoint_ = false;
389
+ }
390
+
391
+ // Throw out any existing cached batch in the buffer and re-creates a new
392
+ // chunk buffer.
393
+ batch_buffer_ = std::make_unique<
394
+ detail::BatchDataBuffer<UnwrappedBatchType, ExampleSamplerType>>(
395
+ options_.batch_size(), example_sampler_, options_.cache_size());
396
+
397
+ // create new workers for this new epoch.
398
+ quit_worker_ = false;
399
+
400
+ AT_ASSERT(running_preloaders_ == 0);
401
+ running_preloaders_ = options_.preloader_count();
402
+ for (const auto i : c10::irange(options_.preloader_count())) {
403
+ preload_threads_.emplace_back([this, i]() { this->preloader(i); });
404
+ }
405
+ }
406
+
407
+ /// size is not used for chunk dataset.
408
+ optional<size_t> size() const override {
409
+ return torch::nullopt;
410
+ }
411
+
412
+ // provide a references to chunk sampler. Used mainly in distributed data
413
+ // loading to set the epoch number for the sampler.
414
+ ChunkSamplerType& chunk_sampler() {
415
+ return chunk_sampler_;
416
+ }
417
+
418
+ void save(serialize::OutputArchive& archive) const override {
419
+ std::lock_guard<std::mutex> lock(chunk_index_guard_);
420
+ chunk_sampler_.save(archive);
421
+ }
422
+
423
+ void load(serialize::InputArchive& archive) override {
424
+ std::lock_guard<std::mutex> lock(chunk_index_guard_);
425
+ chunk_sampler_.load(archive);
426
+ load_checkpoint_ = true;
427
+ }
428
+
429
+ private:
430
+ /// running on worker thread to preload chunk data.
431
+ void preloader(size_t id) {
432
+ while (!quit_worker_.load()) {
433
+ try {
434
+ std::vector<size_t> chunk_idx;
435
+ {
436
+ std::lock_guard<std::mutex> lock(chunk_index_guard_);
437
+ if (auto chunk_sampler_result = chunk_sampler_.next(
438
+ this->options_.cross_chunk_shuffle_count())) {
439
+ chunk_idx = chunk_sampler_result.value();
440
+ } else {
441
+ break;
442
+ }
443
+ }
444
+ UnwrappedBatchType data = chunk_reader_.read_chunk(chunk_idx[0]);
445
+ for (const auto i : c10::irange(1, chunk_idx.size())) {
446
+ auto chunk_data = chunk_reader_.read_chunk(chunk_idx[i]);
447
+ std::move(
448
+ chunk_data.begin(), chunk_data.end(), std::back_inserter(data));
449
+ }
450
+ if (preprocessing_policy_) {
451
+ preprocessing_policy_(data);
452
+ }
453
+ if (!data.empty()) { // skip empty chunks.
454
+ batch_buffer_->add_chunk_data(std::move(data));
455
+ }
456
+ } catch (...) {
457
+ batch_buffer_->add_chunk_data(std::current_exception());
458
+ }
459
+ }
460
+ AT_ASSERT(running_preloaders_.load() > 0);
461
+ --running_preloaders_;
462
+ if (running_preloaders_.load() == 0) {
463
+ // all preloaders are completed, so we can notify the batch_buffer.
464
+ batch_buffer_->stop();
465
+ }
466
+ }
467
+
468
+ /// Block the current thread until the workers finish execution and exit.
469
+ void free_workers() {
470
+ if (!quit_worker_.load()) {
471
+ quit_worker_ = true;
472
+ for (auto& worker_thread : preload_threads_) {
473
+ worker_thread.join();
474
+ }
475
+ }
476
+ }
477
+
478
+ private:
479
+ // Templated class that defines what is a chunk and how to read chunk data.
480
+ // When a chunk is returned by chunk_reader_, ChunkDataset split it into
481
+ // batches and caches them in batch_buffer_.
482
+ ChunkReader chunk_reader_;
483
+
484
+ // chunk sampler to shuffle different chunks
485
+ ChunkSamplerType chunk_sampler_;
486
+
487
+ // example sampler to shuffle examples in a specific chunk
488
+ ExampleSamplerType example_sampler_;
489
+
490
+ // batch data buffer which holds chunk data from preloading thread.
491
+ std::shared_ptr<
492
+ detail::BatchDataBuffer<UnwrappedBatchType, ExampleSamplerType>>
493
+ batch_buffer_;
494
+
495
+ // worker thread pool
496
+ std::vector<std::thread> preload_threads_;
497
+
498
+ /// The options the Dataset was configured with.
499
+ const ChunkDatasetOptions options_;
500
+
501
+ // function pointer wrapper to apply custom processing over chunk data. This
502
+ // is considered an advanced parameter for developers who want to apply a
503
+ // pre-process to the chunk data before sampling into minibatch.
504
+ // Different than the collate function, this policy is applied on the chunk
505
+ // level, instead of minibatch level. When a chunk of data is loaded (multiple
506
+ // chunks if cross_chunk_shuffle_count_ is greater than 1), this policy is
507
+ // applied to the full loaded data. It is useful if developers want to
508
+ // perform pre-processing (like bucketing) to the chunk data before
509
+ // example sampler samples the data. By default it's an empty pointer and no
510
+ // action will be taken.
511
+ std::function<void(UnwrappedBatchType&)> preprocessing_policy_;
512
+
513
+ // indicate whether the worker thread can be teared down
514
+ std::atomic<bool> quit_worker_;
515
+
516
+ // keep track of running preloaders to notify batch buffer. A value 0
517
+ // indicates that the chunk loading is completed.
518
+ std::atomic<size_t> running_preloaders_;
519
+
520
+ // mutex to synchronize chunk sampler next() call.
521
+ mutable std::mutex chunk_index_guard_;
522
+
523
+ // boolean value to indicate whether we need to load the checkpoint for
524
+ // chunk_sampler_.
525
+ bool load_checkpoint_;
526
+ };
527
+ } // namespace datasets
528
+ } // namespace data
529
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/detail/data_shuttle.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/detail/queue.h>
4
+ #include <torch/types.h>
5
+
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/Optional.h>
8
+
9
+ #include <chrono>
10
+ #include <utility>
11
+
12
+ namespace torch {
13
+ namespace data {
14
+ namespace detail {
15
+
16
+ /// Encapsulates the full life cycle of DataLoader jobs.
17
+ ///
18
+ /// When a new job is enqueued to the `DataShuttle`, a counter for in-flight
19
+ /// jobs is bumped. This job is said to be "in-flight" until its result is
20
+ /// popped. Worker threads dequeue jobs as soon as they are available. When a
21
+ /// worker finishes a job, it enqueues the result. Only when the main thread
22
+ /// dequeues a result is the count of in-flight jobs decremented. When the main
23
+ /// thread attempts to dequeue a job but no jobs are in-flight, that means the
24
+ /// epoch is complete and `pop_result` returns an empty optional.
25
+ template <typename Job, typename Result>
26
+ class DataShuttle {
27
+ public:
28
+ /// Pushes a new job. Called by the main thread.
29
+ void push_job(Job job) {
30
+ new_jobs_.push(std::move(job));
31
+ ++in_flight_jobs_;
32
+ }
33
+
34
+ /// Pushes the result of a job. Called by worker threads.
35
+ void push_result(Result result) {
36
+ results_.push(std::move(result));
37
+ }
38
+
39
+ /// Returns the next job, blocking until there is one available. Called by
40
+ /// worker threads.
41
+ Job pop_job() {
42
+ return new_jobs_.pop();
43
+ }
44
+
45
+ /// Returns the result of a job, or nullopt if all jobs were exhausted. Called
46
+ /// by the main thread.
47
+ optional<Result> pop_result(
48
+ optional<std::chrono::milliseconds> timeout = nullopt) {
49
+ if (in_flight_jobs_ > 0) {
50
+ auto result = results_.pop(timeout);
51
+ --in_flight_jobs_;
52
+ return result;
53
+ }
54
+ return nullopt;
55
+ }
56
+
57
+ /// Discards any jobs that are not yet in flight, and waits for all in-flight
58
+ /// jobs to finish, discarding their result.
59
+ void drain() {
60
+ // Clear all inputs so that no further jobs are scheduled.
61
+ auto number_cleared = new_jobs_.clear();
62
+ in_flight_jobs_ -= number_cleared;
63
+ // Remove any outstanding results.
64
+ while (in_flight_jobs_ > 0) {
65
+ pop_result();
66
+ }
67
+ }
68
+
69
+ /// Returns the number of jobs that are still in progress.
70
+ /// When this number is zero, an epoch is finished.
71
+ size_t in_flight_jobs() const noexcept {
72
+ return in_flight_jobs_;
73
+ }
74
+
75
+ private:
76
+ /// The queue for jobs that are not yet in flight.
77
+ Queue<Job> new_jobs_;
78
+ /// The number of in-flight jobs.
79
+ /// NOTE: Not atomic because only manipulated by the main thread.
80
+ size_t in_flight_jobs_ = 0;
81
+ /// The queue for results of finished jobs.
82
+ Queue<Result> results_;
83
+ };
84
+
85
+ } // namespace detail
86
+ } // namespace data
87
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/detail/queue.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/types.h>
4
+
5
+ #include <c10/util/Exception.h>
6
+
7
+ #include <chrono>
8
+ #include <condition_variable>
9
+ #include <cstddef>
10
+ #include <mutex>
11
+ #include <queue>
12
+
13
+ namespace torch {
14
+ namespace data {
15
+ namespace detail {
16
+
17
+ /// A basic locked, blocking MPMC queue.
18
+ ///
19
+ /// Every `push` and `pop` is guarded by a mutex. A condition variable is used
20
+ /// to communicate insertion of new elements, such that waiting threads will be
21
+ /// woken up if they are currently waiting inside a call to `pop()`.
22
+ ///
23
+ /// Note that this data structure is written specifically for use with the
24
+ /// `DataLoader`. Its behavior is tailored to this use case and may not be
25
+ /// applicable to more general uses.
26
+ template <typename T>
27
+ class Queue {
28
+ public:
29
+ /// Pushes a new value to the back of the `Queue` and notifies one thread on
30
+ /// the waiting side about this event.
31
+ void push(T value) {
32
+ {
33
+ std::lock_guard<std::mutex> lock(mutex_);
34
+ queue_.push(std::move(value));
35
+ }
36
+ cv_.notify_one();
37
+ }
38
+
39
+ /// Blocks until at least one element is ready to be popped from the front of
40
+ /// the queue. An optional `timeout` in seconds can be used to limit the time
41
+ /// spent waiting for an element. If the wait times out, an exception is
42
+ /// raised.
43
+ T pop(optional<std::chrono::milliseconds> timeout = nullopt) {
44
+ std::unique_lock<std::mutex> lock(mutex_);
45
+ if (timeout) {
46
+ if (!cv_.wait_for(
47
+ lock, *timeout, [this] { return !this->queue_.empty(); })) {
48
+ // clang-format off
49
+ AT_ERROR(
50
+ "Timeout in DataLoader queue while waiting for next batch"
51
+ " (timeout was ", timeout->count(), " ms)");
52
+ // clang-format on
53
+ }
54
+ } else {
55
+ cv_.wait(lock, [this] { return !this->queue_.empty(); });
56
+ }
57
+ AT_ASSERT(!queue_.empty());
58
+ T value = queue_.front();
59
+ queue_.pop();
60
+ lock.unlock();
61
+ return value;
62
+ }
63
+
64
+ /// Empties the queue and returns the number of elements that were present at
65
+ /// the start of the function. No threads are notified about this event as it
66
+ /// is assumed to be used to drain the queue during shutdown of a
67
+ /// `DataLoader`.
68
+ size_t clear() {
69
+ std::lock_guard<std::mutex> lock(this->mutex_);
70
+ const auto size = queue_.size();
71
+ while (!queue_.empty()) {
72
+ queue_.pop();
73
+ }
74
+ return size;
75
+ }
76
+
77
+ private:
78
+ std::queue<T> queue_;
79
+ std::mutex mutex_;
80
+ std::condition_variable cv_;
81
+ };
82
+ } // namespace detail
83
+ } // namespace data
84
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/detail/sequencers.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/types.h>
4
+
5
+ #include <algorithm>
6
+ #include <cstddef>
7
+ #include <vector>
8
+
9
+ namespace torch {
10
+ namespace data {
11
+ namespace detail {
12
+ namespace sequencers {
13
+ namespace detail {
14
+ template <typename Result>
15
+ bool buffer_contains_result(const std::vector<optional<Result>>& buffer) {
16
+ return std::any_of(
17
+ buffer.begin(), buffer.end(), [](const optional<Result>& result) {
18
+ return result.has_value();
19
+ });
20
+ }
21
+ } // namespace detail
22
+
23
+ /// A `Sequencer` accepts a function that yields the next result of a
24
+ /// `DataLoader` and then has the opportunity to influence the order in which
25
+ /// these results are returned. The `NoSequencer` does not enforce any
26
+ /// sequencing and returns any result directly. The `OrderedSequencer` instead
27
+ /// buffers results internally to return them in order of their sequence number.
28
+ template <typename Result>
29
+ struct Sequencer {
30
+ using ResultProducer = std::function<optional<Result>()>;
31
+ virtual ~Sequencer() = default;
32
+ virtual optional<Result> next(ResultProducer next_result) = 0;
33
+ };
34
+
35
+ /// A `Sequencer` that does not enforce any ordering. It is effectively the
36
+ /// identity function.
37
+ template <typename Result>
38
+ struct NoSequencer final : public Sequencer<Result> {
39
+ using typename Sequencer<Result>::ResultProducer;
40
+ optional<Result> next(ResultProducer next_result) override {
41
+ return next_result();
42
+ }
43
+ };
44
+
45
+ /// A `Sequencer` that buffers results and returns them in order of their
46
+ /// sequence number. The `OrderedSequencer` maintains an internal, monotonically
47
+ /// incrementing counter for the next sequence number it expects. If it receives
48
+ /// a result with a higher sequence number, it will buffer it for later (when
49
+ /// the sequence number reaches that of this result). Otherwise, if the sequence
50
+ /// numbers match, the result is returned.
51
+ ///
52
+ /// Implementation note: The `OrderedSequencer` is implemented with a fixed-size
53
+ /// buffer. Let `m` be the maximum number of jobs in the data loader's queue and
54
+ /// `s` be the current sequence number. Assume `m` jobs are scheduled in the
55
+ /// `DataLoader`. Any new result is stored at index `job.sqn mod m` in the
56
+ /// `OrderedSequencer`. Why are we sure sequence numbers of new jobs will not
57
+ /// collide with sequence numbers of buffered jobs? The `OrderedSequencer` will
58
+ /// not return from `next()` until it receives the result with sqn `s`. This
59
+ /// means no new jobs can be scheduled in the `DataLoader` in the meantime,
60
+ /// which enforces that as long as sqn `s` has not been received, `s + m` (which
61
+ /// would cause a collision in the fixed-size buffer) will not yet be scheduled.
62
+ template <typename Result>
63
+ struct OrderedSequencer : public Sequencer<Result> {
64
+ using typename Sequencer<Result>::ResultProducer;
65
+
66
+ /// Constructs the `OrderedSequencer` with the maximum number of results it
67
+ /// will ever hold at one point in time.
68
+ explicit OrderedSequencer(size_t max_jobs) : buffer_(max_jobs) {}
69
+
70
+ /// Buffers results until the next one in the expected order is received.
71
+ optional<Result> next(ResultProducer next_result) override {
72
+ // If we already have the result for the next sqn, return it.
73
+ if (auto& maybe_result = buffer(next_sequence_number_)) {
74
+ auto result = std::move(*maybe_result);
75
+ buffer(next_sequence_number_++).reset();
76
+ return result;
77
+ }
78
+ // Otherwise wait for the next result.
79
+ while (true) {
80
+ auto result = next_result();
81
+ if (!result) {
82
+ AT_ASSERT(!detail::buffer_contains_result(buffer_));
83
+ break;
84
+ }
85
+ // If it was not nullopt and the sequence numbers match, return it
86
+ // directly and bump the sequence number.
87
+ if (result->sequence_number == next_sequence_number_) {
88
+ ++next_sequence_number_;
89
+ return result;
90
+ }
91
+ // Stash the result for later.
92
+ AT_ASSERT(!buffer(result->sequence_number).has_value());
93
+ buffer(result->sequence_number) = std::move(result);
94
+ }
95
+ // The result was an empty optional, so we are done with this epoch.
96
+ return nullopt;
97
+ }
98
+
99
+ /// Accesses the buffer at the `index` modulo the buffer size.
100
+ optional<Result>& buffer(size_t index) {
101
+ return buffer_.at(index % buffer_.size());
102
+ }
103
+
104
+ /// The monotonically increasing sequence number we expect.
105
+ size_t next_sequence_number_ = 0;
106
+
107
+ /// A fixed-size buffer (after construction).
108
+ std::vector<optional<Result>> buffer_;
109
+ };
110
+ } // namespace sequencers
111
+ } // namespace detail
112
+ } // namespace data
113
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/example.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/types.h>
4
+
5
+ namespace torch {
6
+ namespace data {
7
+
8
+ /// An `Example` from a dataset.
9
+ ///
10
+ /// A dataset consists of data and an associated target (label).
11
+ template <typename Data = at::Tensor, typename Target = at::Tensor>
12
+ struct Example {
13
+ using DataType = Data;
14
+ using TargetType = Target;
15
+
16
+ Example() = default;
17
+ Example(Data data, Target target)
18
+ : data(std::move(data)), target(std::move(target)) {}
19
+
20
+ Data data;
21
+ Target target;
22
+ };
23
+
24
+ namespace example {
25
+ using NoTarget = void;
26
+ } // namespace example
27
+
28
+ /// A specialization for `Example` that does not have a target.
29
+ ///
30
+ /// This class exists so that code can be written for a templated `Example`
31
+ /// type, and work both for labeled and unlabeled datasets.
32
+ template <typename Data>
33
+ struct Example<Data, example::NoTarget> {
34
+ using DataType = Data;
35
+ using TargetType = example::NoTarget;
36
+
37
+ Example() = default;
38
+ /* implicit */ Example(Data data) : data(std::move(data)) {}
39
+
40
+ // When a DataLoader returns an Example like this, that example should be
41
+ // implicitly convertible to the underlying data type.
42
+
43
+ operator Data&() {
44
+ return data;
45
+ }
46
+ operator const Data&() const {
47
+ return data;
48
+ }
49
+
50
+ Data data;
51
+ };
52
+
53
+ using TensorExample = Example<at::Tensor, example::NoTarget>;
54
+ } // namespace data
55
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/iterator.h ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/utils/variadic.h>
4
+ #include <torch/types.h>
5
+
6
+ #include <c10/util/Exception.h>
7
+
8
+ #include <functional>
9
+ #include <iterator>
10
+ #include <memory>
11
+ #include <type_traits>
12
+ #include <utility>
13
+
14
+ namespace torch {
15
+ namespace data {
16
+ namespace detail {
17
+ // For increased safety and more separated logic, this implementation of
18
+ // `Iterator` consists of a `ValidIterator` and a `SentinelIterator`. A
19
+ // `ValidIterator` yields new batches until the `DataLoader` is exhausted. While
20
+ // the `DataLoader` is not exhausted, `ValidIterator`s compare equal if they are
21
+ // the same object. When the `ValidIterator` becomes exhausted, it compares
22
+ // equal to the `SentinelIterator`, but not before. Half the code here is to
23
+ // implement double dispatch for the comparison. Got damnit, C++.
24
+
25
+ template <typename Batch>
26
+ struct ValidIterator;
27
+
28
+ template <typename Batch>
29
+ struct SentinelIterator;
30
+
31
+ /// Base class for the `ValidIterator` and `SentinelIterator`
32
+ template <typename Batch>
33
+ struct IteratorImpl {
34
+ virtual ~IteratorImpl() = default;
35
+ virtual void next() = 0;
36
+ virtual Batch& get() = 0;
37
+ virtual bool operator==(const IteratorImpl& other) const = 0;
38
+ virtual bool operator==(const ValidIterator<Batch>& other) const = 0;
39
+ virtual bool operator==(const SentinelIterator<Batch>& other) const = 0;
40
+ };
41
+
42
+ template <typename Batch>
43
+ struct ValidIterator : public IteratorImpl<Batch> {
44
+ using BatchProducer = std::function<optional<Batch>()>;
45
+
46
+ explicit ValidIterator(BatchProducer next_batch)
47
+ : next_batch_(std::move(next_batch)) {}
48
+
49
+ /// Fetches the next batch.
50
+ void next() override {
51
+ // If we didn't get the very first batch yet, get it now.
52
+ lazy_initialize();
53
+ TORCH_CHECK(
54
+ batch_.has_value(), "Attempted to increment iterator past the end");
55
+ // Increment to the next batch.
56
+ batch_ = next_batch_();
57
+ }
58
+
59
+ /// Returns the current batch. The precondition for this operation to not
60
+ /// throw an exception is that it has been compared to the `SentinelIterator`
61
+ /// and did not compare equal.
62
+ Batch& get() override {
63
+ // If we didn't get the very first batch yet, get it now.
64
+ lazy_initialize();
65
+ TORCH_CHECK(
66
+ batch_.has_value(),
67
+ "Attempted to dereference iterator that was past the end");
68
+ return batch_.value();
69
+ }
70
+
71
+ /// Does double dispatch.
72
+ bool operator==(const IteratorImpl<Batch>& other) const override {
73
+ return other == *this;
74
+ }
75
+
76
+ /// A `ValidIterator` is equal to the `SentinelIterator` iff. the
77
+ /// `ValidIterator` has reached the end of the dataloader.
78
+ bool operator==(const SentinelIterator<Batch>& /* unused */) const override {
79
+ lazy_initialize();
80
+ return !batch_;
81
+ }
82
+
83
+ /// Returns true if the memory address of `other` equals that of `this`.
84
+ bool operator==(const ValidIterator<Batch>& other) const override {
85
+ return &other == this;
86
+ }
87
+
88
+ /// Gets the very first batch if it has not yet been fetched.
89
+ void lazy_initialize() const {
90
+ if (!initialized_) {
91
+ batch_ = next_batch_();
92
+ initialized_ = true;
93
+ }
94
+ }
95
+
96
+ BatchProducer next_batch_;
97
+ mutable optional<Batch> batch_;
98
+ mutable bool initialized_ = false;
99
+ };
100
+
101
+ template <typename Batch>
102
+ struct SentinelIterator : public IteratorImpl<Batch> {
103
+ void next() override {
104
+ AT_ERROR(
105
+ "Incrementing the DataLoader's past-the-end iterator is not allowed");
106
+ }
107
+
108
+ Batch& get() override {
109
+ AT_ERROR(
110
+ "Dereferencing the DataLoader's past-the-end iterator is not allowed");
111
+ }
112
+
113
+ /// Does double dispatch.
114
+ bool operator==(const IteratorImpl<Batch>& other) const override {
115
+ return other == *this;
116
+ }
117
+
118
+ /// Calls the comparison operator between `ValidIterator` and
119
+ /// `SentinelIterator`.
120
+ bool operator==(const ValidIterator<Batch>& other) const override {
121
+ return other == *this;
122
+ }
123
+
124
+ /// Sentinel iterators always compare equal.
125
+ bool operator==(const SentinelIterator<Batch>& other) const override {
126
+ return true;
127
+ }
128
+ };
129
+ } // namespace detail
130
+
131
+ template <typename Batch>
132
+ class Iterator {
133
+ public:
134
+ // Type aliases to make the class recognized as a proper iterator.
135
+ using difference_type = std::ptrdiff_t;
136
+ using value_type = Batch;
137
+ using pointer = Batch*;
138
+ using reference = Batch&;
139
+ using iterator_category = std::input_iterator_tag;
140
+
141
+ explicit Iterator(std::unique_ptr<detail::IteratorImpl<Batch>> impl)
142
+ : impl_(std::move(impl)) {}
143
+
144
+ /// Increments the iterator.
145
+ /// Only permitted for valid iterators (not past the end).
146
+ Iterator& operator++() {
147
+ impl_->next();
148
+ return *this;
149
+ }
150
+
151
+ /// Returns the current batch.
152
+ /// Only permitted for valid iterators (not past the end).
153
+ Batch& operator*() {
154
+ return impl_->get();
155
+ }
156
+
157
+ /// Returns a pointer to the current batch.
158
+ /// Only permitted for valid iterators (not past the end).
159
+ Batch* operator->() {
160
+ return &impl_->get();
161
+ }
162
+
163
+ /// Compares two iterators for equality.
164
+ bool operator==(const Iterator& other) const {
165
+ return *impl_ == *other.impl_;
166
+ }
167
+
168
+ /// Compares two iterators for inequality.
169
+ bool operator!=(const Iterator& other) const {
170
+ return !(*this == other);
171
+ }
172
+
173
+ private:
174
+ /// Points either to a `ValidIterator` or to a `SentinelIterator`.
175
+ std::shared_ptr<detail::IteratorImpl<Batch>> impl_;
176
+ };
177
+ } // namespace data
178
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/samplers/base.h>
4
+ #include <torch/data/samplers/custom_batch_request.h>
5
+ #include <torch/data/samplers/distributed.h>
6
+ #include <torch/data/samplers/random.h>
7
+ #include <torch/data/samplers/sequential.h>
8
+ #include <torch/data/samplers/serialize.h>
9
+ #include <torch/data/samplers/stream.h>
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/base.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/types.h>
5
+
6
+ #include <cstddef>
7
+ #include <mutex>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace serialize {
12
+ class OutputArchive;
13
+ class InputArchive;
14
+ } // namespace serialize
15
+ } // namespace torch
16
+
17
+ namespace torch {
18
+ namespace data {
19
+ namespace samplers {
20
+ /// A `Sampler` is an object that yields an index with which to access a
21
+ /// dataset.
22
+ template <typename BatchRequest = std::vector<size_t>>
23
+ class Sampler {
24
+ public:
25
+ using BatchRequestType = BatchRequest;
26
+
27
+ virtual ~Sampler() = default;
28
+
29
+ /// Resets the `Sampler`'s internal state.
30
+ /// Typically called before a new epoch.
31
+ /// Optionally, accepts a new size when reseting the sampler.
32
+ virtual void reset(optional<size_t> new_size) = 0;
33
+
34
+ /// Returns the next index if possible, or an empty optional if the
35
+ /// sampler is exhausted for this epoch.
36
+ virtual optional<BatchRequest> next(size_t batch_size) = 0;
37
+
38
+ /// Serializes the `Sampler` to the `archive`.
39
+ virtual void save(serialize::OutputArchive& archive) const = 0;
40
+
41
+ /// Deserializes the `Sampler` from the `archive`.
42
+ virtual void load(serialize::InputArchive& archive) = 0;
43
+ };
44
+
45
+ } // namespace samplers
46
+ } // namespace data
47
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/transforms/base.h>
4
+ #include <torch/data/transforms/collate.h>
5
+ #include <torch/data/transforms/lambda.h>
6
+ #include <torch/data/transforms/stack.h>
7
+ #include <torch/data/transforms/tensor.h>
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/base.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/types.h>
4
+
5
+ #include <utility>
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace data {
10
+ namespace transforms {
11
+
12
+ /// A transformation of a batch to a new batch.
13
+ template <typename InputBatch, typename OutputBatch>
14
+ class BatchTransform {
15
+ public:
16
+ using InputBatchType = InputBatch;
17
+ using OutputBatchType = OutputBatch;
18
+
19
+ virtual ~BatchTransform() = default;
20
+
21
+ /// Applies the transformation to the given `input_batch`.
22
+ virtual OutputBatch apply_batch(InputBatch input_batch) = 0;
23
+ };
24
+
25
+ /// A transformation of individual input examples to individual output examples.
26
+ ///
27
+ /// Just like a `Dataset` is a `BatchDataset`, a `Transform` is a
28
+ /// `BatchTransform` that can operate on the level of individual examples rather
29
+ /// than entire batches. The batch-level transform is implemented (by default)
30
+ /// in terms of the example-level transform, though this can be customized.
31
+ template <typename Input, typename Output>
32
+ class Transform
33
+ : public BatchTransform<std::vector<Input>, std::vector<Output>> {
34
+ public:
35
+ using InputType = Input;
36
+ using OutputType = Output;
37
+
38
+ /// Applies the transformation to the given `input`.
39
+ virtual OutputType apply(InputType input) = 0;
40
+
41
+ /// Applies the `transformation` over the entire `input_batch`.
42
+ std::vector<Output> apply_batch(std::vector<Input> input_batch) override {
43
+ std::vector<Output> output_batch;
44
+ output_batch.reserve(input_batch.size());
45
+ for (auto&& input : input_batch) {
46
+ output_batch.push_back(apply(std::move(input)));
47
+ }
48
+ return output_batch;
49
+ }
50
+ };
51
+ } // namespace transforms
52
+ } // namespace data
53
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/collate.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/example.h>
4
+ #include <torch/data/transforms/lambda.h>
5
+
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace data {
10
+ namespace transforms {
11
+
12
+ /// A `Collation` is a transform that reduces a batch into a single value.
13
+ /// The result is a `BatchDataset` that has the type of the single value as its
14
+ /// `BatchType`.
15
+ template <typename T, typename BatchType = std::vector<T>>
16
+ using Collation = BatchTransform<BatchType, T>;
17
+
18
+ /// A `Collate` allows passing a custom function to reduce/collate a batch
19
+ /// into a single value. It's effectively the lambda version of `Collation`,
20
+ /// which you could subclass and override `operator()` to achieve the same.
21
+ ///
22
+ /// \rst
23
+ /// .. code-block:: cpp
24
+ /// using namespace torch::data;
25
+ ///
26
+ /// auto dataset = datasets::MNIST("path/to/mnist")
27
+ /// .map(transforms::Collate<Example<>>([](std::vector<Example<>> e) {
28
+ /// return std::move(e.front());
29
+ /// }));
30
+ /// \endrst
31
+ template <typename T, typename BatchType = std::vector<T>>
32
+ using Collate = BatchLambda<BatchType, T>;
33
+ } // namespace transforms
34
+ } // namespace data
35
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/lambda.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/transforms/base.h>
4
+
5
+ #include <functional>
6
+ #include <utility>
7
+ #include <vector>
8
+
9
+ namespace torch {
10
+ namespace data {
11
+ namespace transforms {
12
+
13
+ /// A `BatchTransform` that applies a user-provided functor to a batch.
14
+ template <typename Input, typename Output = Input>
15
+ class BatchLambda : public BatchTransform<Input, Output> {
16
+ public:
17
+ using typename BatchTransform<Input, Output>::InputBatchType;
18
+ using typename BatchTransform<Input, Output>::OutputBatchType;
19
+ using FunctionType = std::function<OutputBatchType(InputBatchType)>;
20
+
21
+ /// Constructs the `BatchLambda` from the given `function` object.
22
+ explicit BatchLambda(FunctionType function)
23
+ : function_(std::move(function)) {}
24
+
25
+ /// Applies the user-provided function object to the `input_batch`.
26
+ OutputBatchType apply_batch(InputBatchType input_batch) override {
27
+ return function_(std::move(input_batch));
28
+ }
29
+
30
+ private:
31
+ FunctionType function_;
32
+ };
33
+
34
+ // A `Transform` that applies a user-provided functor to individual examples.
35
+ template <typename Input, typename Output = Input>
36
+ class Lambda : public Transform<Input, Output> {
37
+ public:
38
+ using typename Transform<Input, Output>::InputType;
39
+ using typename Transform<Input, Output>::OutputType;
40
+ using FunctionType = std::function<Output(Input)>;
41
+
42
+ /// Constructs the `Lambda` from the given `function` object.
43
+ explicit Lambda(FunctionType function) : function_(std::move(function)) {}
44
+
45
+ /// Applies the user-provided function object to the `input`.
46
+ OutputType apply(InputType input) override {
47
+ return function_(std::move(input));
48
+ }
49
+
50
+ private:
51
+ FunctionType function_;
52
+ };
53
+
54
+ } // namespace transforms
55
+ } // namespace data
56
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/stack.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/example.h>
4
+ #include <torch/data/transforms/collate.h>
5
+ #include <torch/types.h>
6
+
7
+ #include <utility>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace data {
12
+ namespace transforms {
13
+
14
+ template <typename T = Example<>>
15
+ struct Stack;
16
+
17
+ /// A `Collation` for `Example<Tensor, Tensor>` types that stacks all data
18
+ /// tensors into one tensor, and all target (label) tensors into one tensor.
19
+ template <>
20
+ struct Stack<Example<>> : public Collation<Example<>> {
21
+ Example<> apply_batch(std::vector<Example<>> examples) override {
22
+ std::vector<torch::Tensor> data, targets;
23
+ data.reserve(examples.size());
24
+ targets.reserve(examples.size());
25
+ for (auto& example : examples) {
26
+ data.push_back(std::move(example.data));
27
+ targets.push_back(std::move(example.target));
28
+ }
29
+ return {torch::stack(data), torch::stack(targets)};
30
+ }
31
+ };
32
+
33
+ /// A `Collation` for `Example<Tensor, NoTarget>` types that stacks all data
34
+ /// tensors into one tensor.
35
+ template <>
36
+ struct Stack<TensorExample>
37
+ : public Collation<Example<Tensor, example::NoTarget>> {
38
+ TensorExample apply_batch(std::vector<TensorExample> examples) override {
39
+ std::vector<torch::Tensor> data;
40
+ data.reserve(examples.size());
41
+ for (auto& example : examples) {
42
+ data.push_back(std::move(example.data));
43
+ }
44
+ return torch::stack(data);
45
+ }
46
+ };
47
+ } // namespace transforms
48
+ } // namespace data
49
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/tensor.h ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/example.h>
4
+ #include <torch/data/transforms/base.h>
5
+ #include <torch/types.h>
6
+
7
+ #include <functional>
8
+ #include <utility>
9
+
10
+ namespace torch {
11
+ namespace data {
12
+ namespace transforms {
13
+
14
+ /// A `Transform` that is specialized for the typical `Example<Tensor, Tensor>`
15
+ /// combination. It exposes a single `operator()` interface hook (for
16
+ /// subclasses), and calls this function on input `Example` objects.
17
+ template <typename Target = Tensor>
18
+ class TensorTransform
19
+ : public Transform<Example<Tensor, Target>, Example<Tensor, Target>> {
20
+ public:
21
+ using E = Example<Tensor, Target>;
22
+ using typename Transform<E, E>::InputType;
23
+ using typename Transform<E, E>::OutputType;
24
+
25
+ /// Transforms a single input tensor to an output tensor.
26
+ virtual Tensor operator()(Tensor input) = 0;
27
+
28
+ /// Implementation of `Transform::apply` that calls `operator()`.
29
+ OutputType apply(InputType input) override {
30
+ input.data = (*this)(std::move(input.data));
31
+ return input;
32
+ }
33
+ };
34
+
35
+ /// A `Lambda` specialized for the typical `Example<Tensor, Tensor>` input type.
36
+ template <typename Target = Tensor>
37
+ class TensorLambda : public TensorTransform<Target> {
38
+ public:
39
+ using FunctionType = std::function<Tensor(Tensor)>;
40
+
41
+ /// Creates a `TensorLambda` from the given `function`.
42
+ explicit TensorLambda(FunctionType function)
43
+ : function_(std::move(function)) {}
44
+
45
+ /// Applies the user-provided functor to the input tensor.
46
+ Tensor operator()(Tensor input) override {
47
+ return function_(std::move(input));
48
+ }
49
+
50
+ private:
51
+ FunctionType function_;
52
+ };
53
+
54
+ /// Normalizes input tensors by subtracting the supplied mean and dividing by
55
+ /// the given standard deviation.
56
+ template <typename Target = Tensor>
57
+ struct Normalize : public TensorTransform<Target> {
58
+ /// Constructs a `Normalize` transform. The mean and standard deviation can be
59
+ /// anything that is broadcastable over the input tensors (like single
60
+ /// scalars).
61
+ Normalize(ArrayRef<double> mean, ArrayRef<double> stddev)
62
+ : mean(torch::tensor(mean, torch::kFloat32)
63
+ .unsqueeze(/*dim=*/1)
64
+ .unsqueeze(/*dim=*/2)),
65
+ stddev(torch::tensor(stddev, torch::kFloat32)
66
+ .unsqueeze(/*dim=*/1)
67
+ .unsqueeze(/*dim=*/2)) {}
68
+
69
+ torch::Tensor operator()(Tensor input) override {
70
+ return input.sub(mean).div(stddev);
71
+ }
72
+
73
+ torch::Tensor mean, stddev;
74
+ };
75
+ } // namespace transforms
76
+ } // namespace data
77
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/worker_exception.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <exception>
4
+ #include <string>
5
+ #include <utility>
6
+
7
+ namespace torch {
8
+ namespace data {
9
+
10
+ /// An exception thrown when a DataLoader's worker thread throws an exception,
11
+ /// which is caught. A `WorkerException` stores an `exception_ptr` to the
12
+ /// original exception thrown in the worker thread.
13
+ struct WorkerException : public std::exception {
14
+ /// Constructs a `WorkerException` from an `exception_ptr`.
15
+ explicit WorkerException(std::exception_ptr original)
16
+ : original_exception(std::move(original)),
17
+ message("Caught exception in DataLoader worker thread.") {
18
+ try {
19
+ std::rethrow_exception(original_exception);
20
+ } catch (std::exception& e) {
21
+ message += " Original message: ";
22
+ message += e.what();
23
+ }
24
+ }
25
+
26
+ const char* what() const noexcept override {
27
+ return message.c_str();
28
+ }
29
+
30
+ /// The original exception thrown in the worker thread.
31
+ std::exception_ptr original_exception;
32
+
33
+ /// This exception's message (not the original exception's message).
34
+ std::string message;
35
+ };
36
+
37
+ } // namespace data
38
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/conv.h ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/options/conv.h>
4
+ #include <torch/types.h>
5
+
6
+ namespace torch {
7
+ namespace nn {
8
+ namespace functional {
9
+
10
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
11
+ namespace detail {
12
+
13
+ inline std::string padding_unwrap(enumtype::kValid) {
14
+ return "valid";
15
+ }
16
+
17
+ inline std::string padding_unwrap(enumtype::kSame) {
18
+ return "same";
19
+ }
20
+
21
+ template <size_t D>
22
+ IntArrayRef padding_unwrap(const ExpandingArray<D>& array) {
23
+ return array;
24
+ }
25
+
26
+ inline Tensor conv1d(
27
+ const Tensor& input,
28
+ const Tensor& weight,
29
+ const Tensor& bias,
30
+ ExpandingArray<1> stride,
31
+ const Conv1dFuncOptions::padding_t& padding,
32
+ ExpandingArray<1> dilation,
33
+ int64_t groups) {
34
+ return std::visit(
35
+ [&](const auto& pad) {
36
+ return torch::conv1d(
37
+ input, weight, bias, stride, padding_unwrap(pad), dilation, groups);
38
+ },
39
+ padding);
40
+ }
41
+ } // namespace detail
42
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
43
+
44
+ /// See
45
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv1d
46
+ /// about the exact behavior of this functional.
47
+ ///
48
+ /// See the documentation for `torch::nn::functional::Conv1dFuncOptions` class
49
+ /// to learn what optional arguments are supported for this functional.
50
+ ///
51
+ /// Example:
52
+ /// ```
53
+ /// namespace F = torch::nn::functional;
54
+ /// F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1));
55
+ /// ```
56
+ inline Tensor conv1d(
57
+ const Tensor& input,
58
+ const Tensor& weight,
59
+ const Conv1dFuncOptions& options = {}) {
60
+ return detail::conv1d(
61
+ input,
62
+ weight,
63
+ options.bias(),
64
+ options.stride(),
65
+ options.padding(),
66
+ options.dilation(),
67
+ options.groups());
68
+ }
69
+
70
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
71
+ namespace detail {
72
+ inline Tensor conv2d(
73
+ const Tensor& input,
74
+ const Tensor& weight,
75
+ const Tensor& bias,
76
+ ExpandingArray<2> stride,
77
+ const Conv2dFuncOptions::padding_t& padding,
78
+ ExpandingArray<2> dilation,
79
+ int64_t groups) {
80
+ return std::visit(
81
+ [&](const auto& pad) {
82
+ return torch::conv2d(
83
+ input, weight, bias, stride, padding_unwrap(pad), dilation, groups);
84
+ },
85
+ padding);
86
+ }
87
+ } // namespace detail
88
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
89
+
90
+ /// See
91
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv2d
92
+ /// about the exact behavior of this functional.
93
+ ///
94
+ /// See the documentation for `torch::nn::functional::Conv2dFuncOptions` class
95
+ /// to learn what optional arguments are supported for this functional.
96
+ ///
97
+ /// Example:
98
+ /// ```
99
+ /// namespace F = torch::nn::functional;
100
+ /// F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1));
101
+ /// ```
102
+ inline Tensor conv2d(
103
+ const Tensor& input,
104
+ const Tensor& weight,
105
+ const Conv2dFuncOptions& options = {}) {
106
+ return detail::conv2d(
107
+ input,
108
+ weight,
109
+ options.bias(),
110
+ options.stride(),
111
+ options.padding(),
112
+ options.dilation(),
113
+ options.groups());
114
+ }
115
+
116
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
117
+ namespace detail {
118
+ inline Tensor conv3d(
119
+ const Tensor& input,
120
+ const Tensor& weight,
121
+ const Tensor& bias,
122
+ ExpandingArray<3> stride,
123
+ const Conv3dFuncOptions::padding_t& padding,
124
+ ExpandingArray<3> dilation,
125
+ int64_t groups) {
126
+ return std::visit(
127
+ [&](const auto& pad) {
128
+ return torch::conv3d(
129
+ input, weight, bias, stride, padding_unwrap(pad), dilation, groups);
130
+ },
131
+ padding);
132
+ }
133
+ } // namespace detail
134
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
135
+
136
+ /// See
137
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv3d
138
+ /// about the exact behavior of this functional.
139
+ ///
140
+ /// See the documentation for `torch::nn::functional::Conv3dFuncOptions` class
141
+ /// to learn what optional arguments are supported for this functional.
142
+ ///
143
+ /// Example:
144
+ /// ```
145
+ /// namespace F = torch::nn::functional;
146
+ /// F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1));
147
+ /// ```
148
+ inline Tensor conv3d(
149
+ const Tensor& input,
150
+ const Tensor& weight,
151
+ const Conv3dFuncOptions& options = {}) {
152
+ return detail::conv3d(
153
+ input,
154
+ weight,
155
+ options.bias(),
156
+ options.stride(),
157
+ options.padding(),
158
+ options.dilation(),
159
+ options.groups());
160
+ }
161
+
162
+ // ============================================================================
163
+
164
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
165
+ namespace detail {
166
+ inline Tensor conv_transpose1d(
167
+ const Tensor& input,
168
+ const Tensor& weight,
169
+ const Tensor& bias,
170
+ IntArrayRef stride,
171
+ IntArrayRef padding,
172
+ IntArrayRef output_padding,
173
+ int64_t groups,
174
+ IntArrayRef dilation) {
175
+ return torch::conv_transpose1d(
176
+ input, weight, bias, stride, padding, output_padding, groups, dilation);
177
+ }
178
+ } // namespace detail
179
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
180
+
181
+ /// See
182
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose1d
183
+ /// about the exact behavior of this functional.
184
+ ///
185
+ /// See the documentation for
186
+ /// `torch::nn::functional::ConvTranspose1dFuncOptions` class to learn what
187
+ /// optional arguments are supported for this functional.
188
+ ///
189
+ /// Example:
190
+ /// ```
191
+ /// namespace F = torch::nn::functional;
192
+ /// F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1));
193
+ /// ```
194
+ inline Tensor conv_transpose1d(
195
+ const Tensor& input,
196
+ const Tensor& weight,
197
+ const ConvTranspose1dFuncOptions& options = {}) {
198
+ return detail::conv_transpose1d(
199
+ input,
200
+ weight,
201
+ options.bias(),
202
+ options.stride(),
203
+ options.padding(),
204
+ options.output_padding(),
205
+ options.groups(),
206
+ options.dilation());
207
+ }
208
+
209
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
210
+ namespace detail {
211
+ inline Tensor conv_transpose2d(
212
+ const Tensor& input,
213
+ const Tensor& weight,
214
+ const Tensor& bias,
215
+ IntArrayRef stride,
216
+ IntArrayRef padding,
217
+ IntArrayRef output_padding,
218
+ int64_t groups,
219
+ IntArrayRef dilation) {
220
+ return torch::conv_transpose2d(
221
+ input, weight, bias, stride, padding, output_padding, groups, dilation);
222
+ }
223
+ } // namespace detail
224
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
225
+
226
+ /// See
227
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose2d
228
+ /// about the exact behavior of this functional.
229
+ ///
230
+ /// See the documentation for
231
+ /// `torch::nn::functional::ConvTranspose2dFuncOptions` class to learn what
232
+ /// optional arguments are supported for this functional.
233
+ ///
234
+ /// Example:
235
+ /// ```
236
+ /// namespace F = torch::nn::functional;
237
+ /// F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1));
238
+ /// ```
239
+ inline Tensor conv_transpose2d(
240
+ const Tensor& input,
241
+ const Tensor& weight,
242
+ const ConvTranspose2dFuncOptions& options = {}) {
243
+ return detail::conv_transpose2d(
244
+ input,
245
+ weight,
246
+ options.bias(),
247
+ options.stride(),
248
+ options.padding(),
249
+ options.output_padding(),
250
+ options.groups(),
251
+ options.dilation());
252
+ }
253
+
254
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
255
+ namespace detail {
256
+ inline Tensor conv_transpose3d(
257
+ const Tensor& input,
258
+ const Tensor& weight,
259
+ const Tensor& bias,
260
+ IntArrayRef stride,
261
+ IntArrayRef padding,
262
+ IntArrayRef output_padding,
263
+ int64_t groups,
264
+ IntArrayRef dilation) {
265
+ return torch::conv_transpose3d(
266
+ input, weight, bias, stride, padding, output_padding, groups, dilation);
267
+ }
268
+ } // namespace detail
269
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
270
+
271
+ /// See
272
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose3d
273
+ /// about the exact behavior of this functional.
274
+ ///
275
+ /// See the documentation for
276
+ /// `torch::nn::functional::ConvTranspose3dFuncOptions` class to learn what
277
+ /// optional arguments are supported for this functional.
278
+ ///
279
+ /// Example:
280
+ /// ```
281
+ /// namespace F = torch::nn::functional;
282
+ /// F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1));
283
+ /// ```
284
+ inline Tensor conv_transpose3d(
285
+ const Tensor& input,
286
+ const Tensor& weight,
287
+ const ConvTranspose3dFuncOptions& options = {}) {
288
+ return detail::conv_transpose3d(
289
+ input,
290
+ weight,
291
+ options.bias(),
292
+ options.stride(),
293
+ options.padding(),
294
+ options.output_padding(),
295
+ options.groups(),
296
+ options.dilation());
297
+ }
298
+
299
+ } // namespace functional
300
+ } // namespace nn
301
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/padding.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/PadNd.h>
4
+ #include <torch/nn/options/padding.h>
5
+
6
+ namespace torch {
7
+ namespace nn {
8
+ namespace functional {
9
+
10
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
11
+ namespace detail {
12
+ inline Tensor pad(
13
+ const Tensor& input,
14
+ IntArrayRef pad,
15
+ PadFuncOptions::mode_t mode,
16
+ double value) {
17
+ const auto mode_enum = [&] {
18
+ if (std::holds_alternative<enumtype::kConstant>(mode)) {
19
+ return at::padding_mode::constant;
20
+ } else if (std::holds_alternative<enumtype::kReflect>(mode)) {
21
+ return at::padding_mode::reflect;
22
+ } else if (std::holds_alternative<enumtype::kReplicate>(mode)) {
23
+ return at::padding_mode::replicate;
24
+ } else if (std::holds_alternative<enumtype::kCircular>(mode)) {
25
+ return at::padding_mode::circular;
26
+ }
27
+ TORCH_CHECK(false, "Unrecognised padding mode");
28
+ }();
29
+
30
+ c10::optional<double> fill_value;
31
+ if (value != 0.0) {
32
+ fill_value = value;
33
+ }
34
+ return at::_pad_enum(input, pad, static_cast<int64_t>(mode_enum), fill_value);
35
+ }
36
+ } // namespace detail
37
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
38
+
39
+ /// See
40
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pad
41
+ /// about the exact behavior of this functional.
42
+ ///
43
+ /// See the documentation for `torch::nn::functional::PadFuncOptions` class to
44
+ /// learn what optional arguments are supported for this functional.
45
+ ///
46
+ /// Example:
47
+ /// ```
48
+ /// namespace F = torch::nn::functional;
49
+ /// F::pad(input, F::PadFuncOptions({1, 2, 2, 1, 1,
50
+ /// 2}).mode(torch::kReplicate));
51
+ /// ```
52
+ inline Tensor pad(const Tensor& input, const PadFuncOptions& options) {
53
+ return detail::pad(input, options.pad(), options.mode(), options.value());
54
+ }
55
+
56
+ } // namespace functional
57
+ } // namespace nn
58
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/upsampling.h ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/nn/functional/pooling.h>
5
+ #include <torch/nn/options/upsampling.h>
6
+
7
+ #include <cmath>
8
+ #include <utility>
9
+
10
+ namespace torch {
11
+ namespace nn {
12
+ namespace functional {
13
+
14
+ inline std::vector<int64_t> _interp_output_size(
15
+ int64_t dim,
16
+ std::tuple<
17
+ Tensor,
18
+ c10::optional<std::vector<int64_t>>,
19
+ c10::optional<std::vector<double>>,
20
+ c10::optional<bool>> closed_over_args) {
21
+ Tensor input;
22
+ c10::optional<std::vector<int64_t>> size;
23
+ c10::optional<std::vector<double>> scale_factor;
24
+ c10::optional<bool> recompute_scale_factor;
25
+ std::tie(input, size, scale_factor, recompute_scale_factor) =
26
+ closed_over_args;
27
+ if (size == c10::nullopt && scale_factor == c10::nullopt) {
28
+ TORCH_CHECK(false, "either size or scale_factor should be defined");
29
+ }
30
+ if (size != c10::nullopt && scale_factor != c10::nullopt) {
31
+ TORCH_CHECK(false, "only one of size or scale_factor should be defined");
32
+ }
33
+ if (scale_factor != c10::nullopt) {
34
+ if (static_cast<int64_t>(scale_factor.value().size()) != dim) {
35
+ TORCH_CHECK(
36
+ false,
37
+ "scale_factor shape must match input shape. ",
38
+ "Input is ",
39
+ dim,
40
+ "D, scale_factor size is ",
41
+ torch::ArrayRef<double>(*scale_factor));
42
+ }
43
+ }
44
+ if (size != c10::nullopt) {
45
+ return *size;
46
+ }
47
+
48
+ TORCH_INTERNAL_ASSERT(scale_factor != c10::nullopt);
49
+ auto scale_factors = *scale_factor;
50
+
51
+ if (recompute_scale_factor == c10::nullopt) {
52
+ // only warn when the scales have floating values since
53
+ // the result for ints is the same with/without recompute_scale_factor
54
+ bool is_float_scale_factor = false;
55
+ for (double scale : scale_factors) {
56
+ is_float_scale_factor = floor(scale) != scale;
57
+ if (is_float_scale_factor) {
58
+ break;
59
+ }
60
+ }
61
+ if (is_float_scale_factor) {
62
+ TORCH_WARN(
63
+ "The default behavior for interpolate/upsample with float scale_factor changed "
64
+ "in 1.6.0 to align with other frameworks/libraries, and uses scale_factor directly, "
65
+ "instead of relying on the computed output size. "
66
+ "If you wish to keep the old behavior, please set recompute_scale_factor=True. "
67
+ "See the documentation of nn.Upsample for details. ");
68
+ }
69
+ }
70
+
71
+ std::vector<int64_t> ret;
72
+ for (const auto i : c10::irange(dim)) {
73
+ ret.emplace_back(static_cast<int64_t>(
74
+ floor(static_cast<double>(input.size(i + 2)) * scale_factors[i])));
75
+ }
76
+ return ret;
77
+ }
78
+
79
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
80
+ namespace detail {
81
+ inline Tensor interpolate(
82
+ const Tensor& input,
83
+ const c10::optional<std::vector<int64_t>>& size,
84
+ const c10::optional<std::vector<double>>& scale_factor,
85
+ InterpolateFuncOptions::mode_t mode,
86
+ c10::optional<bool> align_corners,
87
+ c10::optional<bool> recompute_scale_factor,
88
+ bool antialias) {
89
+ if (std::holds_alternative<enumtype::kNearest>(mode) ||
90
+ std::get_if<enumtype::kArea>(&mode)) {
91
+ if (align_corners != c10::nullopt) {
92
+ TORCH_CHECK(
93
+ false,
94
+ "align_corners option can only be set with the "
95
+ "interpolating modes: linear | bilinear | bicubic | trilinear");
96
+ }
97
+ } else {
98
+ if (align_corners == c10::nullopt) {
99
+ TORCH_WARN(
100
+ "Default upsampling behavior when mode=",
101
+ enumtype::get_enum_name(mode),
102
+ " is changed "
103
+ "to align_corners=False since 0.4.0. Please specify "
104
+ "align_corners=True if the old behavior is desired. "
105
+ "See the documentation of nn.Upsample for details.");
106
+ align_corners = false;
107
+ }
108
+ }
109
+
110
+ TORCH_CHECK(
111
+ input.dim() >= 3 && input.dim() <= 5,
112
+ "Input Error: Only 3D, 4D and 5D input Tensors supported "
113
+ "(got ",
114
+ input.dim(),
115
+ "D) for the modes: nearest | linear | bilinear | bicubic | trilinear "
116
+ "(got ",
117
+ enumtype::get_enum_name(mode),
118
+ ")");
119
+
120
+ auto scale_factor_len = input.dim() - 2;
121
+ std::vector<c10::optional<double>> scale_factor_list(
122
+ scale_factor_len, c10::nullopt);
123
+ if (scale_factor != c10::nullopt && !recompute_scale_factor.value_or(false)) {
124
+ auto _scale_factor_repeated = *scale_factor;
125
+ scale_factor_list = {};
126
+ for (const auto& elem : _scale_factor_repeated) {
127
+ scale_factor_list.emplace_back(elem);
128
+ }
129
+ }
130
+
131
+ if (antialias &&
132
+ !(input.dim() == 4 &&
133
+ (std::get_if<enumtype::kBilinear>(&mode) ||
134
+ std::get_if<enumtype::kBicubic>(&mode)))) {
135
+ TORCH_CHECK(
136
+ false,
137
+ "Anti-alias option is only supported for bilinear and bicubic modes");
138
+ }
139
+
140
+ auto closed_over_args =
141
+ std::make_tuple(input, size, scale_factor, recompute_scale_factor);
142
+ if (input.dim() == 3 && std::get_if<enumtype::kNearest>(&mode)) {
143
+ return torch::upsample_nearest1d(
144
+ input,
145
+ _interp_output_size(1, std::move(closed_over_args)),
146
+ scale_factor_list.at(0));
147
+ } else if (input.dim() == 4 && std::get_if<enumtype::kNearest>(&mode)) {
148
+ return torch::upsample_nearest2d(
149
+ input,
150
+ _interp_output_size(2, std::move(closed_over_args)),
151
+ scale_factor_list.at(0),
152
+ scale_factor_list.at(1));
153
+ } else if (input.dim() == 5 && std::get_if<enumtype::kNearest>(&mode)) {
154
+ return torch::upsample_nearest3d(
155
+ input,
156
+ _interp_output_size(3, std::move(closed_over_args)),
157
+ scale_factor_list.at(0),
158
+ scale_factor_list.at(1),
159
+ scale_factor_list.at(2));
160
+ } else if (input.dim() == 3 && std::get_if<enumtype::kNearestExact>(&mode)) {
161
+ return torch::_upsample_nearest_exact1d(
162
+ input,
163
+ _interp_output_size(1, std::move(closed_over_args)),
164
+ scale_factor_list.at(0));
165
+ } else if (input.dim() == 4 && std::get_if<enumtype::kNearestExact>(&mode)) {
166
+ return torch::_upsample_nearest_exact2d(
167
+ input,
168
+ _interp_output_size(2, std::move(closed_over_args)),
169
+ scale_factor_list.at(0),
170
+ scale_factor_list.at(1));
171
+ } else if (input.dim() == 5 && std::get_if<enumtype::kNearestExact>(&mode)) {
172
+ return torch::_upsample_nearest_exact3d(
173
+ input,
174
+ _interp_output_size(3, std::move(closed_over_args)),
175
+ scale_factor_list.at(0),
176
+ scale_factor_list.at(1),
177
+ scale_factor_list.at(2));
178
+ } else if (input.dim() == 3 && std::get_if<enumtype::kArea>(&mode)) {
179
+ return detail::adaptive_avg_pool1d(
180
+ input, _interp_output_size(1, std::move(closed_over_args)));
181
+ } else if (input.dim() == 4 && std::get_if<enumtype::kArea>(&mode)) {
182
+ return detail::adaptive_avg_pool2d(
183
+ input, _interp_output_size(2, std::move(closed_over_args)));
184
+ } else if (input.dim() == 5 && std::get_if<enumtype::kArea>(&mode)) {
185
+ return detail::adaptive_avg_pool3d(
186
+ input, _interp_output_size(3, std::move(closed_over_args)));
187
+ } else if (input.dim() == 3 && std::get_if<enumtype::kLinear>(&mode)) {
188
+ TORCH_INTERNAL_ASSERT(align_corners != c10::nullopt);
189
+ return torch::upsample_linear1d(
190
+ input,
191
+ _interp_output_size(1, std::move(closed_over_args)),
192
+ *align_corners,
193
+ scale_factor_list.at(0));
194
+ } else if (input.dim() == 3 && std::get_if<enumtype::kBilinear>(&mode)) {
195
+ TORCH_CHECK(false, "Got 3D input, but bilinear mode needs 4D input");
196
+ } else if (input.dim() == 3 && std::get_if<enumtype::kTrilinear>(&mode)) {
197
+ TORCH_CHECK(false, "Got 3D input, but trilinear mode needs 5D input");
198
+ } else if (input.dim() == 4 && std::get_if<enumtype::kLinear>(&mode)) {
199
+ TORCH_CHECK(false, "Got 4D input, but linear mode needs 3D input");
200
+ } else if (input.dim() == 4 && std::get_if<enumtype::kBilinear>(&mode)) {
201
+ TORCH_INTERNAL_ASSERT(align_corners != c10::nullopt);
202
+ if (antialias) {
203
+ return torch::_upsample_bilinear2d_aa(
204
+ input,
205
+ _interp_output_size(2, std::move(closed_over_args)),
206
+ *align_corners,
207
+ scale_factor_list.at(0),
208
+ scale_factor_list.at(1));
209
+ }
210
+ return torch::upsample_bilinear2d(
211
+ input,
212
+ _interp_output_size(2, std::move(closed_over_args)),
213
+ *align_corners,
214
+ scale_factor_list.at(0),
215
+ scale_factor_list.at(1));
216
+ } else if (input.dim() == 4 && std::get_if<enumtype::kTrilinear>(&mode)) {
217
+ TORCH_CHECK(false, "Got 4D input, but trilinear mode needs 5D input");
218
+ } else if (input.dim() == 5 && std::get_if<enumtype::kLinear>(&mode)) {
219
+ TORCH_CHECK(false, "Got 5D input, but linear mode needs 3D input");
220
+ } else if (input.dim() == 5 && std::get_if<enumtype::kBilinear>(&mode)) {
221
+ TORCH_CHECK(false, "Got 5D input, but bilinear mode needs 4D input");
222
+ } else if (input.dim() == 5 && std::get_if<enumtype::kTrilinear>(&mode)) {
223
+ TORCH_INTERNAL_ASSERT(align_corners != c10::nullopt);
224
+ return torch::upsample_trilinear3d(
225
+ input,
226
+ _interp_output_size(3, std::move(closed_over_args)),
227
+ *align_corners,
228
+ scale_factor_list.at(0),
229
+ scale_factor_list.at(1),
230
+ scale_factor_list.at(2));
231
+ } else if (input.dim() == 4 && std::get_if<enumtype::kBicubic>(&mode)) {
232
+ TORCH_INTERNAL_ASSERT(align_corners != c10::nullopt);
233
+ if (antialias) {
234
+ return torch::_upsample_bicubic2d_aa(
235
+ input,
236
+ _interp_output_size(2, std::move(closed_over_args)),
237
+ *align_corners,
238
+ scale_factor_list.at(0),
239
+ scale_factor_list.at(1));
240
+ }
241
+ return torch::upsample_bicubic2d(
242
+ input,
243
+ _interp_output_size(2, std::move(closed_over_args)),
244
+ *align_corners,
245
+ scale_factor_list.at(0),
246
+ scale_factor_list.at(1));
247
+ } else {
248
+ TORCH_CHECK(
249
+ false,
250
+ "Input Error: Only 3D, 4D and 5D input Tensors supported "
251
+ "(got ",
252
+ input.dim(),
253
+ "D) for the modes: nearest | linear | bilinear | bicubic | trilinear "
254
+ "(got ",
255
+ enumtype::get_enum_name(mode),
256
+ ")");
257
+ }
258
+ }
259
+ } // namespace detail
260
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
261
+
262
+ /// See
263
+ /// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.interpolate
264
+ /// about the exact behavior of this functional.
265
+ ///
266
+ /// See the documentation for `torch::nn::functional::InterpolateFuncOptions`
267
+ /// class to learn what optional arguments are supported for this functional.
268
+ ///
269
+ /// Example:
270
+ /// ```
271
+ /// namespace F = torch::nn::functional;
272
+ /// F::interpolate(input,
273
+ /// F::InterpolateFuncOptions().size({4}).mode(torch::kNearest));
274
+ /// ```
275
+ inline Tensor interpolate(
276
+ const Tensor& input,
277
+ const InterpolateFuncOptions& options = {}) {
278
+ return detail::interpolate(
279
+ input,
280
+ options.size(),
281
+ options.scale_factor(),
282
+ options.mode(),
283
+ options.align_corners(),
284
+ options.recompute_scale_factor(),
285
+ options.antialias());
286
+ }
287
+
288
+ } // namespace functional
289
+ } // namespace nn
290
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/_functions.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/custom_function.h>
4
+ #include <torch/csrc/autograd/variable.h>
5
+ #include <torch/nn/options/normalization.h>
6
+ #include <torch/types.h>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+ namespace functions {
11
+
12
+ class CrossMapLRN2d : public torch::autograd::Function<CrossMapLRN2d> {
13
+ public:
14
+ static torch::autograd::Variable forward(
15
+ torch::autograd::AutogradContext* ctx,
16
+ const torch::autograd::Variable& input,
17
+ const CrossMapLRN2dOptions& options);
18
+
19
+ static torch::autograd::variable_list backward(
20
+ torch::autograd::AutogradContext* ctx,
21
+ torch::autograd::variable_list grad_output);
22
+ };
23
+
24
+ } // namespace functions
25
+ } // namespace nn
26
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/activation.h ADDED
@@ -0,0 +1,875 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/functional/activation.h>
5
+ #include <torch/nn/modules/common.h>
6
+ #include <torch/nn/modules/linear.h>
7
+ #include <torch/nn/options/activation.h>
8
+
9
+ #include <torch/csrc/Export.h>
10
+
11
+ namespace torch {
12
+ namespace nn {
13
+
14
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
15
+
16
+ /// Applies elu over a given input.
17
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ELU to learn
18
+ /// about the exact behavior of this module.
19
+ ///
20
+ /// See the documentation for `torch::nn::ELUOptions` class to learn what
21
+ /// constructor arguments are supported for this module.
22
+ ///
23
+ /// Example:
24
+ /// ```
25
+ /// ELU model(ELUOptions().alpha(42.42).inplace(true));
26
+ /// ```
27
+ class TORCH_API ELUImpl : public torch::nn::Cloneable<ELUImpl> {
28
+ public:
29
+ explicit ELUImpl(const ELUOptions& options_ = {});
30
+
31
+ Tensor forward(Tensor input);
32
+
33
+ void reset() override;
34
+
35
+ /// Pretty prints the `ELU` module into the given `stream`.
36
+ void pretty_print(std::ostream& stream) const override;
37
+
38
+ /// The options with which this `Module` was constructed.
39
+ ELUOptions options;
40
+ };
41
+
42
+ /// A `ModuleHolder` subclass for `ELUImpl`.
43
+ /// See the documentation for `ELUImpl` class to learn what methods it
44
+ /// provides, and examples of how to use `ELU` with `torch::nn::ELUOptions`.
45
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
46
+ /// module storage semantics.
47
+ TORCH_MODULE(ELU);
48
+
49
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
50
+
51
+ /// Applies the selu function element-wise.
52
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.SELU to learn
53
+ /// about the exact behavior of this module.
54
+ ///
55
+ /// See the documentation for `torch::nn::SELUOptions` class to learn what
56
+ /// constructor arguments are supported for this module.
57
+ ///
58
+ /// Example:
59
+ /// ```
60
+ /// SELU model(SELUOptions().inplace(true));
61
+ /// ```
62
+ class TORCH_API SELUImpl : public torch::nn::Cloneable<SELUImpl> {
63
+ public:
64
+ explicit SELUImpl(const SELUOptions& options_ = {});
65
+
66
+ Tensor forward(Tensor input);
67
+
68
+ void reset() override;
69
+
70
+ /// Pretty prints the `SELU` module into the given `stream`.
71
+ void pretty_print(std::ostream& stream) const override;
72
+
73
+ /// The options with which this `Module` was constructed.
74
+ SELUOptions options;
75
+ };
76
+
77
+ /// A `ModuleHolder` subclass for `SELUImpl`.
78
+ /// See the documentation for `SELUImpl` class to learn what methods it
79
+ /// provides, and examples of how to use `SELU` with `torch::nn::SELUOptions`.
80
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
81
+ /// module storage semantics.
82
+ TORCH_MODULE(SELU);
83
+
84
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hardshrink ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
85
+
86
+ /// Applies the hard shrinkage function element-wise.
87
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Hardshrink to learn
88
+ /// about the exact behavior of this module.
89
+ ///
90
+ /// See the documentation for `torch::nn::HardshrinkOptions` class to learn what
91
+ /// constructor arguments are supported for this module.
92
+ ///
93
+ /// Example:
94
+ /// ```
95
+ /// Hardshrink model(HardshrinkOptions().lambda(42.42));
96
+ /// ```
97
+ class TORCH_API HardshrinkImpl : public torch::nn::Cloneable<HardshrinkImpl> {
98
+ public:
99
+ explicit HardshrinkImpl(const HardshrinkOptions& options_ = {});
100
+
101
+ Tensor forward(const Tensor& input);
102
+
103
+ void reset() override;
104
+
105
+ /// Pretty prints the `Hardshrink` module into the given `stream`.
106
+ void pretty_print(std::ostream& stream) const override;
107
+
108
+ /// The options with which this `Module` was constructed.
109
+ HardshrinkOptions options;
110
+ };
111
+
112
+ /// A `ModuleHolder` subclass for `HardshrinkImpl`.
113
+ /// See the documentation for `HardshrinkImpl` class to learn what methods it
114
+ /// provides, and examples of how to use `Hardshrink` with
115
+ /// `torch::nn::HardshrinkOptions`. See the documentation for `ModuleHolder` to
116
+ /// learn about PyTorch's module storage semantics.
117
+ TORCH_MODULE(Hardshrink);
118
+
119
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hardtanh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
120
+
121
+ /// Applies the HardTanh function element-wise.
122
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Hardtanh to learn
123
+ /// about the exact behavior of this module.
124
+ ///
125
+ /// See the documentation for `torch::nn::HardtanhOptions` class to learn what
126
+ /// constructor arguments are supported for this module.
127
+ ///
128
+ /// Example:
129
+ /// ```
130
+ /// Hardtanh
131
+ /// model(HardtanhOptions().min_val(-42.42).max_val(0.42).inplace(true));
132
+ /// ```
133
+ class TORCH_API HardtanhImpl : public torch::nn::Cloneable<HardtanhImpl> {
134
+ public:
135
+ explicit HardtanhImpl(const HardtanhOptions& options_ = {});
136
+
137
+ Tensor forward(Tensor input);
138
+
139
+ void reset() override;
140
+
141
+ /// Pretty prints the `Hardtanh` module into the given `stream`.
142
+ void pretty_print(std::ostream& stream) const override;
143
+
144
+ /// The options with which this `Module` was constructed.
145
+ HardtanhOptions options;
146
+ };
147
+
148
+ /// A `ModuleHolder` subclass for `HardtanhImpl`.
149
+ /// See the documentation for `HardtanhImpl` class to learn what methods it
150
+ /// provides, and examples of how to use `Hardtanh` with
151
+ /// `torch::nn::HardtanhOptions`. See the documentation for `ModuleHolder` to
152
+ /// learn about PyTorch's module storage semantics.
153
+ TORCH_MODULE(Hardtanh);
154
+
155
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LeakyReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
156
+
157
+ /// Applies the LeakyReLU function element-wise.
158
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.LeakyReLU to learn
159
+ /// about the exact behavior of this module.
160
+ ///
161
+ /// See the documentation for `torch::nn::LeakyReLUOptions` class to learn what
162
+ /// constructor arguments are supported for this module.
163
+ ///
164
+ /// Example:
165
+ /// ```
166
+ /// LeakyReLU model(LeakyReLUOptions().negative_slope(0.42).inplace(true));
167
+ /// ```
168
+ class TORCH_API LeakyReLUImpl : public torch::nn::Cloneable<LeakyReLUImpl> {
169
+ public:
170
+ explicit LeakyReLUImpl(const LeakyReLUOptions& options_ = {});
171
+
172
+ Tensor forward(Tensor input);
173
+
174
+ void reset() override;
175
+
176
+ /// Pretty prints the `LeakyReLU` module into the given `stream`.
177
+ void pretty_print(std::ostream& stream) const override;
178
+
179
+ /// The options with which this `Module` was constructed.
180
+ LeakyReLUOptions options;
181
+ };
182
+
183
+ /// A `ModuleHolder` subclass for `LeakyReLUImpl`.
184
+ /// See the documentation for `LeakyReLUImpl` class to learn what methods it
185
+ /// provides, and examples of how to use `LeakyReLU` with
186
+ /// `torch::nn::LeakyReLUOptions`. See the documentation for `ModuleHolder` to
187
+ /// learn about PyTorch's module storage semantics.
188
+ TORCH_MODULE(LeakyReLU);
189
+
190
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LogSigmoid ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
191
+
192
+ /// Applies the LogSigmoid function element-wise.
193
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.LogSigmoid to learn
194
+ /// about the exact behavior of this module.
195
+ class TORCH_API LogSigmoidImpl : public torch::nn::Cloneable<LogSigmoidImpl> {
196
+ public:
197
+ Tensor forward(const Tensor& input);
198
+
199
+ void reset() override;
200
+
201
+ /// Pretty prints the `LogSigmoid` module into the given `stream`.
202
+ void pretty_print(std::ostream& stream) const override;
203
+ };
204
+
205
+ /// A `ModuleHolder` subclass for `LogSigmoidImpl`.
206
+ /// See the documentation for `LogSigmoidImpl` class to learn what methods it
207
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
208
+ /// module storage semantics.
209
+ TORCH_MODULE(LogSigmoid);
210
+
211
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softmax ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
212
+
213
+ /// Applies the Softmax function.
214
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Softmax to learn
215
+ /// about the exact behavior of this module.
216
+ ///
217
+ /// See the documentation for `torch::nn::SoftmaxOptions` class to learn what
218
+ /// constructor arguments are supported for this module.
219
+ ///
220
+ /// Example:
221
+ /// ```
222
+ /// Softmax model(SoftmaxOptions(1));
223
+ /// ```
224
+ class TORCH_API SoftmaxImpl : public torch::nn::Cloneable<SoftmaxImpl> {
225
+ public:
226
+ explicit SoftmaxImpl(int64_t dim) : SoftmaxImpl(SoftmaxOptions(dim)) {}
227
+ explicit SoftmaxImpl(const SoftmaxOptions& options_);
228
+
229
+ Tensor forward(const Tensor& input);
230
+
231
+ void reset() override;
232
+
233
+ /// Pretty prints the `Softmax` module into the given `stream`.
234
+ void pretty_print(std::ostream& stream) const override;
235
+
236
+ SoftmaxOptions options;
237
+ };
238
+
239
+ /// A `ModuleHolder` subclass for `SoftmaxImpl`.
240
+ /// See the documentation for `SoftmaxImpl` class to learn what methods it
241
+ /// provides, and examples of how to use `Softmax` with
242
+ /// `torch::nn::SoftmaxOptions`. See the documentation for `ModuleHolder` to
243
+ /// learn about PyTorch's module storage semantics.
244
+ TORCH_MODULE(Softmax);
245
+
246
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softmin ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
247
+
248
+ /// Applies the Softmin function element-wise.
249
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Softmin to learn
250
+ /// about the exact behavior of this module.
251
+ ///
252
+ /// See the documentation for `torch::nn::SoftminOptions` class to learn what
253
+ /// constructor arguments are supported for this module.
254
+ ///
255
+ /// Example:
256
+ /// ```
257
+ /// Softmin model(SoftminOptions(1));
258
+ /// ```
259
+ class TORCH_API SoftminImpl : public torch::nn::Cloneable<SoftminImpl> {
260
+ public:
261
+ explicit SoftminImpl(int64_t dim) : SoftminImpl(SoftminOptions(dim)) {}
262
+ explicit SoftminImpl(const SoftminOptions& options_);
263
+
264
+ Tensor forward(const Tensor& input);
265
+
266
+ void reset() override;
267
+
268
+ /// Pretty prints the `Softmin` module into the given `stream`.
269
+ void pretty_print(std::ostream& stream) const override;
270
+
271
+ SoftminOptions options;
272
+ };
273
+
274
+ /// A `ModuleHolder` subclass for `SoftminImpl`.
275
+ /// See the documentation for `SoftminImpl` class to learn what methods it
276
+ /// provides, and examples of how to use `Softmin` with
277
+ /// `torch::nn::SoftminOptions`. See the documentation for `ModuleHolder` to
278
+ /// learn about PyTorch's module storage semantics.
279
+ TORCH_MODULE(Softmin);
280
+
281
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LogSoftmax ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
282
+
283
+ /// Applies the LogSoftmax function element-wise.
284
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.LogSoftmax to learn
285
+ /// about the exact behavior of this module.
286
+ ///
287
+ /// See the documentation for `torch::nn::LogSoftmaxOptions` class to learn what
288
+ /// constructor arguments are supported for this module.
289
+ ///
290
+ /// Example:
291
+ /// ```
292
+ /// LogSoftmax model(LogSoftmaxOptions(1));
293
+ /// ```
294
+ class TORCH_API LogSoftmaxImpl : public torch::nn::Cloneable<LogSoftmaxImpl> {
295
+ public:
296
+ explicit LogSoftmaxImpl(int64_t dim)
297
+ : LogSoftmaxImpl(LogSoftmaxOptions(dim)) {}
298
+ explicit LogSoftmaxImpl(const LogSoftmaxOptions& options_);
299
+
300
+ Tensor forward(const Tensor& input);
301
+
302
+ void reset() override;
303
+
304
+ /// Pretty prints the `LogSoftmax` module into the given `stream`.
305
+ void pretty_print(std::ostream& stream) const override;
306
+
307
+ LogSoftmaxOptions options;
308
+ };
309
+
310
+ /// A `ModuleHolder` subclass for `LogSoftmaxImpl`.
311
+ /// See the documentation for `LogSoftmaxImpl` class to learn what methods it
312
+ /// provides, and examples of how to use `LogSoftmax` with
313
+ /// `torch::nn::LogSoftmaxOptions`. See the documentation for `ModuleHolder` to
314
+ /// learn about PyTorch's module storage semantics.
315
+ TORCH_MODULE(LogSoftmax);
316
+
317
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softmax2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
318
+
319
+ /// Applies the Softmax2d function element-wise.
320
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Softmax2d to learn
321
+ /// about the exact behavior of this module.
322
+ class TORCH_API Softmax2dImpl : public torch::nn::Cloneable<Softmax2dImpl> {
323
+ public:
324
+ Tensor forward(const Tensor& input);
325
+
326
+ void reset() override;
327
+
328
+ /// Pretty prints the `Softmax2d` module into the given `stream`.
329
+ void pretty_print(std::ostream& stream) const override;
330
+ };
331
+
332
+ /// A `ModuleHolder` subclass for `Softmax2dImpl`.
333
+ /// See the documentation for `Softmax2dImpl` class to learn what methods it
334
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
335
+ /// module storage semantics.
336
+ TORCH_MODULE(Softmax2d);
337
+
338
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
339
+
340
+ /// Applies the PReLU function element-wise.
341
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.PReLU to learn
342
+ /// about the exact behavior of this module.
343
+ ///
344
+ /// See the documentation for `torch::nn::PReLUOptions` class to learn what
345
+ /// constructor arguments are supported for this module.
346
+ ///
347
+ /// Example:
348
+ /// ```
349
+ /// PReLU model(PReLUOptions().num_parameters(42));
350
+ /// ```
351
+ class TORCH_API PReLUImpl : public torch::nn::Cloneable<PReLUImpl> {
352
+ public:
353
+ explicit PReLUImpl(const PReLUOptions& options_ = {});
354
+
355
+ Tensor forward(const Tensor& input);
356
+
357
+ void reset() override;
358
+
359
+ /// Pretty prints the `PReLU` module into the given `stream`.
360
+ void pretty_print(std::ostream& stream) const override;
361
+
362
+ /// The options with which this `Module` was constructed.
363
+ PReLUOptions options;
364
+
365
+ /// The learned weight.
366
+ Tensor weight;
367
+ };
368
+
369
+ /// A `ModuleHolder` subclass for `PReLUImpl`.
370
+ /// See the documentation for `PReLUImpl` class to learn what methods it
371
+ /// provides, and examples of how to use `PReLU` with `torch::nn::PReLUOptions`.
372
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
373
+ /// module storage semantics.
374
+ TORCH_MODULE(PReLU);
375
+
376
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
377
+
378
+ /// Applies the ReLU function element-wise.
379
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ReLU to learn
380
+ /// about the exact behavior of this module.
381
+ ///
382
+ /// See the documentation for `torch::nn::ReLUOptions` class to learn what
383
+ /// constructor arguments are supported for this module.
384
+ ///
385
+ /// Example:
386
+ /// ```
387
+ /// ReLU model(ReLUOptions().inplace(true));
388
+ /// ```
389
+ class TORCH_API ReLUImpl : public torch::nn::Cloneable<ReLUImpl> {
390
+ public:
391
+ explicit ReLUImpl(const ReLUOptions& options_ = {});
392
+
393
+ Tensor forward(Tensor input);
394
+
395
+ void reset() override;
396
+
397
+ /// Pretty prints the `ReLU` module into the given `stream`.
398
+ void pretty_print(std::ostream& stream) const override;
399
+
400
+ /// The options with which this `Module` was constructed.
401
+ ReLUOptions options;
402
+ };
403
+
404
+ /// A `ModuleHolder` subclass for `ReLUImpl`.
405
+ /// See the documentation for `ReLUImpl` class to learn what methods it
406
+ /// provides, and examples of how to use `ReLU` with `torch::nn::ReLUOptions`.
407
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
408
+ /// module storage semantics.
409
+ TORCH_MODULE(ReLU);
410
+
411
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReLU6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
412
+
413
+ /// Applies the ReLU6 function element-wise.
414
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ReLU6 to learn
415
+ /// about the exact behavior of this module.
416
+ ///
417
+ /// See the documentation for `torch::nn::ReLU6Options` class to learn what
418
+ /// constructor arguments are supported for this module.
419
+ ///
420
+ /// Example:
421
+ /// ```
422
+ /// ReLU6 model(ReLU6Options().inplace(true));
423
+ /// ```
424
+ class TORCH_API ReLU6Impl : public torch::nn::Cloneable<ReLU6Impl> {
425
+ public:
426
+ explicit ReLU6Impl(const ReLU6Options& options_ = {});
427
+
428
+ Tensor forward(Tensor input);
429
+
430
+ void reset() override;
431
+
432
+ /// Pretty prints the `ReLU6` module into the given `stream`.
433
+ void pretty_print(std::ostream& stream) const override;
434
+
435
+ /// The options with which this `Module` was constructed.
436
+ ReLU6Options options;
437
+ };
438
+
439
+ /// A `ModuleHolder` subclass for `ReLU6Impl`.
440
+ /// See the documentation for `ReLU6Impl` class to learn what methods it
441
+ /// provides, and examples of how to use `ReLU6` with `torch::nn::ReLU6Options`.
442
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
443
+ /// module storage semantics.
444
+ TORCH_MODULE(ReLU6);
445
+
446
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RReLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
447
+
448
+ /// Applies the RReLU function element-wise.
449
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.RReLU to learn
450
+ /// about the exact behavior of this module.
451
+ ///
452
+ /// See the documentation for `torch::nn::RReLUOptions` class to learn what
453
+ /// constructor arguments are supported for this module.
454
+ ///
455
+ /// Example:
456
+ /// ```
457
+ /// RReLU model(RReLUOptions().lower(0.24).upper(0.42).inplace(true));
458
+ /// ```
459
+ class TORCH_API RReLUImpl : public torch::nn::Cloneable<RReLUImpl> {
460
+ public:
461
+ explicit RReLUImpl(const RReLUOptions& options_ = {});
462
+
463
+ Tensor forward(Tensor input);
464
+
465
+ void reset() override;
466
+
467
+ /// Pretty prints the `RReLU` module into the given `stream`.
468
+ void pretty_print(std::ostream& stream) const override;
469
+
470
+ /// The options with which this `Module` was constructed.
471
+ RReLUOptions options;
472
+ };
473
+
474
+ /// A `ModuleHolder` subclass for `RReLUImpl`.
475
+ /// See the documentation for `RReLUImpl` class to learn what methods it
476
+ /// provides, and examples of how to use `RReLU` with `torch::nn::RReLUOptions`.
477
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
478
+ /// module storage semantics.
479
+ TORCH_MODULE(RReLU);
480
+
481
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
482
+
483
+ /// Applies celu over a given input.
484
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.CELU to learn
485
+ /// about the exact behavior of this module.
486
+ ///
487
+ /// See the documentation for `torch::nn::CELUOptions` class to learn what
488
+ /// constructor arguments are supported for this module.
489
+ ///
490
+ /// Example:
491
+ /// ```
492
+ /// CELU model(CELUOptions().alpha(42.42).inplace(true));
493
+ /// ```
494
+ class TORCH_API CELUImpl : public torch::nn::Cloneable<CELUImpl> {
495
+ public:
496
+ explicit CELUImpl(const CELUOptions& options_ = {});
497
+
498
+ Tensor forward(Tensor input);
499
+
500
+ void reset() override;
501
+
502
+ /// Pretty prints the `CELU` module into the given `stream`.
503
+ void pretty_print(std::ostream& stream) const override;
504
+
505
+ /// The options with which this `Module` was constructed.
506
+ CELUOptions options;
507
+ };
508
+
509
+ /// A `ModuleHolder` subclass for `CELUImpl`.
510
+ /// See the documentation for `CELUImpl` class to learn what methods it
511
+ /// provides, and examples of how to use `CELU` with `torch::nn::CELUOptions`.
512
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
513
+ /// module storage semantics.
514
+ TORCH_MODULE(CELU);
515
+
516
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
517
+
518
+ /// Applies glu over a given input.
519
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.GLU to learn
520
+ /// about the exact behavior of this module.
521
+ ///
522
+ /// See the documentation for `torch::nn::GLUOptions` class to learn what
523
+ /// constructor arguments are supported for this module.
524
+ ///
525
+ /// Example:
526
+ /// ```
527
+ /// GLU model(GLUOptions(1));
528
+ /// ```
529
+ class TORCH_API GLUImpl : public torch::nn::Cloneable<GLUImpl> {
530
+ public:
531
+ explicit GLUImpl(const GLUOptions& options_ = {});
532
+
533
+ Tensor forward(const Tensor& input);
534
+
535
+ void reset() override;
536
+
537
+ /// Pretty prints the `GLU` module into the given `stream`.
538
+ void pretty_print(std::ostream& stream) const override;
539
+
540
+ /// The options with which this `Module` was constructed.
541
+ GLUOptions options;
542
+ };
543
+
544
+ /// A `ModuleHolder` subclass for `GLUImpl`.
545
+ /// See the documentation for `GLUImpl` class to learn what methods it
546
+ /// provides, and examples of how to use `GLU` with `torch::nn::GLUOptions`.
547
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
548
+ /// module storage semantics.
549
+ TORCH_MODULE(GLU);
550
+
551
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
552
+
553
+ /// Applies gelu over a given input.
554
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.GELU to learn
555
+ /// about the exact behavior of this module.
556
+ class TORCH_API GELUImpl : public torch::nn::Cloneable<GELUImpl> {
557
+ public:
558
+ explicit GELUImpl(GELUOptions options_ = {});
559
+
560
+ Tensor forward(const Tensor& input);
561
+
562
+ void reset() override;
563
+
564
+ /// Pretty prints the `GELU` module into the given `stream`.
565
+ void pretty_print(std::ostream& stream) const override;
566
+
567
+ /// The options with which this `Module` was constructed.
568
+ GELUOptions options;
569
+ };
570
+
571
+ /// A `ModuleHolder` subclass for `GELUImpl`.
572
+ /// See the documentation for `GELUImpl` class to learn what methods it
573
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
574
+ /// module storage semantics.
575
+ TORCH_MODULE(GELU);
576
+
577
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SiLU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
578
+
579
+ /// Applies silu over a given input.
580
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.SiLU to learn
581
+ /// about the exact behavior of this module.
582
+ class TORCH_API SiLUImpl : public torch::nn::Cloneable<SiLUImpl> {
583
+ public:
584
+ Tensor forward(const Tensor& input);
585
+
586
+ void reset() override;
587
+
588
+ /// Pretty prints the `SiLU` module into the given `stream`.
589
+ void pretty_print(std::ostream& stream) const override;
590
+ };
591
+
592
+ /// A `ModuleHolder` subclass for `SiLUImpl`.
593
+ /// See the documentation for `SiLUImpl` class to learn what methods it
594
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
595
+ /// module storage semantics.
596
+ TORCH_MODULE(SiLU);
597
+
598
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Mish ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
599
+
600
+ /// Applies mish over a given input.
601
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Mish to learn
602
+ /// about the exact behavior of this module.
603
+ class TORCH_API MishImpl : public torch::nn::Cloneable<MishImpl> {
604
+ public:
605
+ Tensor forward(const Tensor& input);
606
+
607
+ void reset() override;
608
+
609
+ /// Pretty prints the `Mish` module into the given `stream`.
610
+ void pretty_print(std::ostream& stream) const override;
611
+ };
612
+
613
+ /// A `ModuleHolder` subclass for `MishImpl`.
614
+ /// See the documentation for `MishImpl` class to learn what methods it
615
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
616
+ /// module storage semantics.
617
+ TORCH_MODULE(Mish);
618
+
619
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sigmoid ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
620
+
621
+ /// Applies sigmoid over a given input.
622
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Sigmoid to learn
623
+ /// about the exact behavior of this module.
624
+ class TORCH_API SigmoidImpl : public torch::nn::Cloneable<SigmoidImpl> {
625
+ public:
626
+ Tensor forward(const Tensor& input);
627
+
628
+ void reset() override;
629
+
630
+ /// Pretty prints the `Sigmoid` module into the given `stream`.
631
+ void pretty_print(std::ostream& stream) const override;
632
+ };
633
+
634
+ /// A `ModuleHolder` subclass for `SigmoidImpl`.
635
+ /// See the documentation for `SigmoidImpl` class to learn what methods it
636
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
637
+ /// module storage semantics.
638
+ TORCH_MODULE(Sigmoid);
639
+
640
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softplus ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
641
+
642
+ /// Applies softplus over a given input.
643
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Softplus to learn
644
+ /// about the exact behavior of this module.
645
+ ///
646
+ /// See the documentation for `torch::nn::SoftplusOptions` class to learn what
647
+ /// constructor arguments are supported for this module.
648
+ ///
649
+ /// Example:
650
+ /// ```
651
+ /// Softplus model(SoftplusOptions().beta(0.24).threshold(42.42));
652
+ /// ```
653
+ class TORCH_API SoftplusImpl : public torch::nn::Cloneable<SoftplusImpl> {
654
+ public:
655
+ explicit SoftplusImpl(const SoftplusOptions& options_ = {});
656
+
657
+ Tensor forward(const Tensor& input);
658
+
659
+ void reset() override;
660
+
661
+ /// Pretty prints the `Softplus` module into the given `stream`.
662
+ void pretty_print(std::ostream& stream) const override;
663
+
664
+ /// The options with which this `Module` was constructed.
665
+ SoftplusOptions options;
666
+ };
667
+
668
+ /// A `ModuleHolder` subclass for `SoftplusImpl`.
669
+ /// See the documentation for `SoftplusImpl` class to learn what methods it
670
+ /// provides, and examples of how to use `Softplus` with
671
+ /// `torch::nn::SoftplusOptions`. See the documentation for `ModuleHolder` to
672
+ /// learn about PyTorch's module storage semantics.
673
+ TORCH_MODULE(Softplus);
674
+
675
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softshrink ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
676
+
677
+ /// Applies the soft shrinkage function element-wise.
678
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Softshrink to learn
679
+ /// about the exact behavior of this module.
680
+ ///
681
+ /// See the documentation for `torch::nn::SoftshrinkOptions` class to learn what
682
+ /// constructor arguments are supported for this module.
683
+ ///
684
+ /// Example:
685
+ /// ```
686
+ /// Softshrink model(SoftshrinkOptions(42.42));
687
+ /// ```
688
+ class TORCH_API SoftshrinkImpl : public torch::nn::Cloneable<SoftshrinkImpl> {
689
+ public:
690
+ explicit SoftshrinkImpl(const SoftshrinkOptions& options_ = {});
691
+
692
+ Tensor forward(const Tensor& input);
693
+
694
+ void reset() override;
695
+
696
+ /// Pretty prints the `Softshrink` module into the given `stream`.
697
+ void pretty_print(std::ostream& stream) const override;
698
+
699
+ /// The options with which this `Module` was constructed.
700
+ SoftshrinkOptions options;
701
+ };
702
+
703
+ /// A `ModuleHolder` subclass for `SoftshrinkImpl`.
704
+ /// See the documentation for `SoftshrinkImpl` class to learn what methods it
705
+ /// provides, and examples of how to use `Softshrink` with
706
+ /// `torch::nn::SoftshrinkOptions`. See the documentation for `ModuleHolder` to
707
+ /// learn about PyTorch's module storage semantics.
708
+ TORCH_MODULE(Softshrink);
709
+
710
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softsign ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
711
+
712
+ /// Applies Softsign over a given input.
713
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Softsign to learn
714
+ /// about the exact behavior of this module.
715
+ class TORCH_API SoftsignImpl : public torch::nn::Cloneable<SoftsignImpl> {
716
+ public:
717
+ Tensor forward(const Tensor& input);
718
+
719
+ void reset() override;
720
+
721
+ /// Pretty prints the `Softsign` module into the given `stream`.
722
+ void pretty_print(std::ostream& stream) const override;
723
+ };
724
+
725
+ /// A `ModuleHolder` subclass for `SoftsignImpl`.
726
+ /// See the documentation for `SoftsignImpl` class to learn what methods it
727
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
728
+ /// module storage semantics.
729
+ TORCH_MODULE(Softsign);
730
+
731
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tanh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
732
+
733
+ /// Applies Tanh over a given input.
734
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Tanh to learn
735
+ /// about the exact behavior of this module.
736
+ class TORCH_API TanhImpl : public torch::nn::Cloneable<TanhImpl> {
737
+ public:
738
+ Tensor forward(const Tensor& input);
739
+
740
+ void reset() override;
741
+
742
+ /// Pretty prints the `Tanh` module into the given `stream`.
743
+ void pretty_print(std::ostream& stream) const override;
744
+ };
745
+
746
+ /// A `ModuleHolder` subclass for `TanhImpl`.
747
+ /// See the documentation for `TanhImpl` class to learn what methods it
748
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
749
+ /// module storage semantics.
750
+ TORCH_MODULE(Tanh);
751
+
752
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tanhshrink ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
753
+
754
+ /// Applies Tanhshrink over a given input.
755
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Tanhshrink to learn
756
+ /// about the exact behavior of this module.
757
+ class TORCH_API TanhshrinkImpl : public torch::nn::Cloneable<TanhshrinkImpl> {
758
+ public:
759
+ Tensor forward(const Tensor& input);
760
+
761
+ void reset() override;
762
+
763
+ /// Pretty prints the `Tanhshrink` module into the given `stream`.
764
+ void pretty_print(std::ostream& stream) const override;
765
+ };
766
+
767
+ /// A `ModuleHolder` subclass for `TanhshrinkImpl`.
768
+ /// See the documentation for `TanhshrinkImpl` class to learn what methods it
769
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
770
+ /// module storage semantics.
771
+ TORCH_MODULE(Tanhshrink);
772
+
773
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Threshold ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
774
+
775
+ /// Applies the Threshold function element-wise.
776
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Threshold to learn
777
+ /// about the exact behavior of this module.
778
+ ///
779
+ /// See the documentation for `torch::nn::ThresholdOptions` class to learn what
780
+ /// constructor arguments are supported for this module.
781
+ ///
782
+ /// Example:
783
+ /// ```
784
+ /// Threshold model(ThresholdOptions(42.42, 24.24).inplace(true));
785
+ /// ```
786
+ class TORCH_API ThresholdImpl : public torch::nn::Cloneable<ThresholdImpl> {
787
+ public:
788
+ ThresholdImpl(double threshold, double value)
789
+ : ThresholdImpl(ThresholdOptions(threshold, value)) {}
790
+ explicit ThresholdImpl(const ThresholdOptions& options_);
791
+
792
+ Tensor forward(Tensor input);
793
+
794
+ void reset() override;
795
+
796
+ /// Pretty prints the `Threshold` module into the given `stream`.
797
+ void pretty_print(std::ostream& stream) const override;
798
+
799
+ /// The options with which this `Module` was constructed.
800
+ ThresholdOptions options;
801
+ };
802
+
803
+ /// A `ModuleHolder` subclass for `ThresholdImpl`.
804
+ /// See the documentation for `ThresholdImpl` class to learn what methods it
805
+ /// provides, and examples of how to use `Threshold` with
806
+ /// `torch::nn::ThresholdOptions`. See the documentation for `ModuleHolder` to
807
+ /// learn about PyTorch's module storage semantics.
808
+ TORCH_MODULE(Threshold);
809
+
810
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MultiheadAttention ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
811
+
812
+ /// Applies the MultiheadAttention function element-wise.
813
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.MultiheadAttention
814
+ /// to learn about the exact behavior of this module.
815
+ ///
816
+ /// See the documentation for `torch::nn::MultiheadAttentionOptions` class to
817
+ /// learn what constructor arguments are supported for this module.
818
+ ///
819
+ /// Example:
820
+ /// ```
821
+ /// MultiheadAttention model(MultiheadAttentionOptions(20, 10).bias(false));
822
+ /// ```
823
+ class TORCH_API MultiheadAttentionImpl
824
+ : public torch::nn::Cloneable<MultiheadAttentionImpl> {
825
+ public:
826
+ MultiheadAttentionImpl(int64_t embed_dim, int64_t num_heads)
827
+ : MultiheadAttentionImpl(
828
+ MultiheadAttentionOptions(embed_dim, num_heads)) {}
829
+ explicit MultiheadAttentionImpl(const MultiheadAttentionOptions& options_);
830
+
831
+ std::tuple<Tensor, Tensor> forward(
832
+ const Tensor& query,
833
+ const Tensor& key,
834
+ const Tensor& value,
835
+ const Tensor& key_padding_mask = {},
836
+ bool need_weights = true,
837
+ const Tensor& attn_mask = {},
838
+ bool average_attn_weights = true);
839
+
840
+ protected:
841
+ FORWARD_HAS_DEFAULT_ARGS(
842
+ {3, AnyValue(Tensor())},
843
+ {4, AnyValue(true)},
844
+ {5, AnyValue(Tensor())},
845
+ {6, AnyValue(true)})
846
+
847
+ public:
848
+ void reset() override;
849
+
850
+ void _reset_parameters();
851
+
852
+ /// The options with which this `Module` was constructed.
853
+ MultiheadAttentionOptions options;
854
+
855
+ bool _qkv_same_embed_dim;
856
+ Tensor in_proj_weight;
857
+ Tensor in_proj_bias;
858
+ Tensor bias_k;
859
+ Tensor bias_v;
860
+ Linear out_proj = nullptr;
861
+ Tensor q_proj_weight;
862
+ Tensor k_proj_weight;
863
+ Tensor v_proj_weight;
864
+ int64_t head_dim;
865
+ };
866
+
867
+ /// A `ModuleHolder` subclass for `MultiheadAttentionImpl`.
868
+ /// See the documentation for `MultiheadAttentionImpl` class to learn what
869
+ /// methods it provides, and examples of how to use `MultiheadAttention` with
870
+ /// `torch::nn::MultiheadAttentionOptions`. See the documentation for
871
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
872
+ TORCH_MODULE(MultiheadAttention);
873
+
874
+ } // namespace nn
875
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/adaptive.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/functional/activation.h>
5
+ #include <torch/nn/module.h>
6
+ #include <torch/nn/modules/container/modulelist.h>
7
+ #include <torch/nn/modules/container/sequential.h>
8
+ #include <torch/nn/modules/linear.h>
9
+ #include <torch/nn/options/adaptive.h>
10
+
11
+ namespace torch {
12
+ namespace nn {
13
+
14
+ /// The output of a single invocation of an AdaptiveLogSoftmaxWithLoss
15
+ /// module's `forward()` method.
16
+ struct TORCH_API ASMoutput {
17
+ ASMoutput(Tensor output_, double loss_);
18
+
19
+ /// Tensor containing computed target log probabilities for each example
20
+ Tensor output;
21
+
22
+ /// Scalar representing the computed negative log likelihood loss
23
+ double loss;
24
+ };
25
+
26
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveLogSoftmaxWithLoss
27
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28
+
29
+ /// Efficient softmax approximation as described in
30
+ /// `Efficient softmax approximation for GPUs`_ by Edouard Grave, Armand Joulin,
31
+ /// Moustapha Cissé, David Grangier, and Hervé Jégou.
32
+ /// See
33
+ /// https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveLogSoftmaxWithLoss
34
+ /// to learn about the exact behavior of this module.
35
+ ///
36
+ /// See the documentation for `torch::nn::AdaptiveLogSoftmaxWithLossOptions`
37
+ /// class to learn what constructor arguments are supported for this module.
38
+ ///
39
+ /// Example:
40
+ /// ```
41
+ /// AdaptiveLogSoftmaxWithLoss model(AdaptiveLogSoftmaxWithLossOptions(8, 10,
42
+ /// {4, 8}).div_value(2.).head_bias(true));
43
+ /// ```
44
+ class TORCH_API AdaptiveLogSoftmaxWithLossImpl
45
+ : public Cloneable<AdaptiveLogSoftmaxWithLossImpl> {
46
+ public:
47
+ AdaptiveLogSoftmaxWithLossImpl(
48
+ int64_t in_features,
49
+ int64_t n_classes,
50
+ std::vector<int64_t> cutoffs)
51
+ : AdaptiveLogSoftmaxWithLossImpl(AdaptiveLogSoftmaxWithLossOptions(
52
+ in_features,
53
+ n_classes,
54
+ cutoffs)) {}
55
+
56
+ explicit AdaptiveLogSoftmaxWithLossImpl(
57
+ AdaptiveLogSoftmaxWithLossOptions options_);
58
+
59
+ ASMoutput forward(const Tensor& input, const Tensor& target);
60
+
61
+ void reset() override;
62
+
63
+ void reset_parameters();
64
+
65
+ /// Pretty prints the `AdaptiveLogSoftmaxWithLoss` module into the given
66
+ /// `stream`.
67
+ void pretty_print(std::ostream& stream) const override;
68
+
69
+ /// Given input tensor, and output of `head`, computes the log of the full
70
+ /// distribution
71
+ Tensor _get_full_log_prob(const Tensor& input, const Tensor& head_output);
72
+
73
+ /// Computes log probabilities for all n_classes
74
+ Tensor log_prob(const Tensor& input);
75
+
76
+ /// This is equivalent to `log_pob(input).argmax(1)` but is more efficient in
77
+ /// some cases
78
+ Tensor predict(const Tensor& input);
79
+
80
+ /// The options with which this `Module` was constructed
81
+ AdaptiveLogSoftmaxWithLossOptions options;
82
+
83
+ /// Cutoffs used to assign targets to their buckets. It should be an ordered
84
+ /// Sequence of integers sorted in the increasing order
85
+ std::vector<int64_t> cutoffs;
86
+
87
+ int64_t shortlist_size;
88
+
89
+ /// Number of clusters
90
+ int64_t n_clusters;
91
+
92
+ /// Output size of head classifier
93
+ int64_t head_size;
94
+
95
+ Linear head = nullptr;
96
+
97
+ ModuleList tail;
98
+ };
99
+
100
+ /// A `ModuleHolder` subclass for `AdaptiveLogSoftmaxWithLossImpl`.
101
+ /// See the documentation for `AdaptiveLogSoftmaxWithLossImpl` class to learn
102
+ /// what methods it provides, and examples of how to use
103
+ /// `AdaptiveLogSoftmaxWithLoss` with
104
+ /// `torch::nn::AdaptiveLogSoftmaxWithLossOptions`. See the documentation for
105
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
106
+ TORCH_MODULE(AdaptiveLogSoftmaxWithLoss);
107
+
108
+ } // namespace nn
109
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/batchnorm.h ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/functional/batchnorm.h>
5
+ #include <torch/nn/init.h>
6
+ #include <torch/nn/options/batchnorm.h>
7
+ #include <torch/nn/pimpl.h>
8
+ #include <torch/types.h>
9
+
10
+ #include <cstdint>
11
+
12
+ namespace torch {
13
+ namespace nn {
14
+
15
+ /// Base class for all (dimension-specialized) batchnorm and instancenorm
16
+ /// modules.
17
+ template <size_t D, typename Derived, typename DerivedOptions>
18
+ class NormImplBase : public torch::nn::Cloneable<Derived> {
19
+ protected:
20
+ virtual void _check_input_dim(const Tensor& input) = 0;
21
+
22
+ public:
23
+ NormImplBase(const DerivedOptions& options_) : options(options_) {
24
+ // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall)
25
+ reset();
26
+ }
27
+
28
+ void reset() override {
29
+ if (options.affine()) {
30
+ weight = this->register_parameter(
31
+ "weight", torch::empty({options.num_features()}));
32
+ bias = this->register_parameter(
33
+ "bias", torch::empty({options.num_features()}));
34
+ } else {
35
+ weight =
36
+ this->register_parameter("weight", Tensor(), /*requires_grad=*/false);
37
+ bias =
38
+ this->register_parameter("bias", Tensor(), /*requires_grad=*/false);
39
+ }
40
+ if (options.track_running_stats()) {
41
+ running_mean = this->register_buffer(
42
+ "running_mean", torch::zeros({options.num_features()}));
43
+ running_var = this->register_buffer(
44
+ "running_var", torch::ones({options.num_features()}));
45
+ num_batches_tracked = this->register_buffer(
46
+ "num_batches_tracked", torch::tensor(0, torch::dtype(torch::kLong)));
47
+ } else {
48
+ running_mean = this->register_buffer("running_mean", Tensor());
49
+ running_var = this->register_buffer("running_var", Tensor());
50
+ num_batches_tracked =
51
+ this->register_buffer("num_batches_tracked", Tensor());
52
+ }
53
+ reset_parameters();
54
+ }
55
+
56
+ void reset_running_stats() {
57
+ if (options.track_running_stats()) {
58
+ running_mean.zero_();
59
+ running_var.fill_(1);
60
+ num_batches_tracked.zero_();
61
+ }
62
+ }
63
+
64
+ void reset_parameters() {
65
+ reset_running_stats();
66
+ if (options.affine()) {
67
+ torch::nn::init::ones_(weight);
68
+ torch::nn::init::zeros_(bias);
69
+ }
70
+ }
71
+
72
+ /// The options with which this module was constructed.
73
+ DerivedOptions options;
74
+
75
+ /// The learned weight.
76
+ /// Only defined if the `affine` option was `true` upon construction.
77
+ Tensor weight;
78
+
79
+ /// The learned bias.
80
+ /// Only defined if the `affine` option was `true` upon construction.
81
+ Tensor bias;
82
+
83
+ /// The running mean.
84
+ /// Only defined if the `track_running_stats` option was `true` upon
85
+ /// construction.
86
+ Tensor running_mean;
87
+
88
+ /// The running variance.
89
+ /// Only defined if the `track_running_stats` option was `true` upon
90
+ /// construction.
91
+ Tensor running_var;
92
+
93
+ /// The number of the forward call.
94
+ /// Only defined if the `track_running_stats` option was `true` upon
95
+ /// construction.
96
+ Tensor num_batches_tracked;
97
+ };
98
+
99
+ /// Base class for all (dimension-specialized) batchnorm modules.
100
+ template <size_t D, typename Derived>
101
+ class BatchNormImplBase : public NormImplBase<D, Derived, BatchNormOptions> {
102
+ public:
103
+ using NormImplBase<D, Derived, BatchNormOptions>::NormImplBase;
104
+
105
+ Tensor forward(const Tensor& input) {
106
+ this->_check_input_dim(input);
107
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
108
+ double exponential_average_factor;
109
+ if (this->options.momentum() == c10::nullopt) {
110
+ exponential_average_factor = 0.0;
111
+ } else {
112
+ exponential_average_factor = this->options.momentum().value();
113
+ }
114
+
115
+ if (this->is_training() && this->options.track_running_stats()) {
116
+ if (this->num_batches_tracked.defined()) {
117
+ this->num_batches_tracked += 1;
118
+ if (this->options.momentum() ==
119
+ c10::nullopt) { // use cumulative moving average
120
+ exponential_average_factor =
121
+ 1.0 / this->num_batches_tracked.template item<double>();
122
+ } else { // use exponential moving average
123
+ exponential_average_factor = this->options.momentum().value();
124
+ }
125
+ }
126
+ }
127
+
128
+ return torch::nn::functional::detail::batch_norm(
129
+ input,
130
+ this->running_mean,
131
+ this->running_var,
132
+ this->weight,
133
+ this->bias,
134
+ this->is_training() || !this->options.track_running_stats(),
135
+ /*momentum=*/exponential_average_factor,
136
+ this->options.eps());
137
+ }
138
+
139
+ /// Pretty prints the `BatchNorm{1,2,3}d` module into the given `stream`.
140
+ void pretty_print(std::ostream& stream) const override {
141
+ stream << std::boolalpha << "torch::nn::BatchNorm" << D << "d("
142
+ << this->options.num_features() << ", "
143
+ << "eps=" << this->options.eps() << ", "
144
+ << "momentum=";
145
+
146
+ if (this->options.momentum().has_value()) {
147
+ stream << this->options.momentum().value();
148
+ } else {
149
+ stream << "None";
150
+ }
151
+
152
+ stream << ", "
153
+ << "affine=" << this->options.affine() << ", "
154
+ << "track_running_stats=" << this->options.track_running_stats()
155
+ << ")";
156
+ }
157
+ };
158
+
159
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm1d
160
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
161
+
162
+ /// Applies the BatchNorm1d function.
163
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.BatchNorm1d to learn
164
+ /// about the exact behavior of this module.
165
+ ///
166
+ /// See the documentation for `torch::nn::BatchNorm1dOptions` class to learn
167
+ /// what constructor arguments are supported for this module.
168
+ ///
169
+ /// Example:
170
+ /// ```
171
+ /// BatchNorm1d
172
+ /// model(BatchNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
173
+ /// ```
174
+ class TORCH_API BatchNorm1dImpl : public BatchNormImplBase<1, BatchNorm1dImpl> {
175
+ protected:
176
+ void _check_input_dim(const Tensor& input) override;
177
+
178
+ public:
179
+ using BatchNormImplBase<1, BatchNorm1dImpl>::BatchNormImplBase;
180
+ };
181
+
182
+ /// A `ModuleHolder` subclass for `BatchNorm1dImpl`.
183
+ /// See the documentation for `BatchNorm1dImpl` class to learn what methods it
184
+ /// provides, and examples of how to use `BatchNorm1d` with
185
+ /// `torch::nn::BatchNorm1dOptions`. See the documentation for `ModuleHolder` to
186
+ /// learn about PyTorch's module storage semantics.
187
+ TORCH_MODULE(BatchNorm1d);
188
+
189
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm2d
190
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
191
+
192
+ /// Applies the BatchNorm2d function.
193
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.BatchNorm2d to learn
194
+ /// about the exact behavior of this module.
195
+ ///
196
+ /// See the documentation for `torch::nn::BatchNorm2dOptions` class to learn
197
+ /// what constructor arguments are supported for this module.
198
+ ///
199
+ /// Example:
200
+ /// ```
201
+ /// BatchNorm2d
202
+ /// model(BatchNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
203
+ /// ```
204
+ class TORCH_API BatchNorm2dImpl : public BatchNormImplBase<2, BatchNorm2dImpl> {
205
+ protected:
206
+ void _check_input_dim(const Tensor& input) override;
207
+
208
+ public:
209
+ using BatchNormImplBase<2, BatchNorm2dImpl>::BatchNormImplBase;
210
+ };
211
+
212
+ /// A `ModuleHolder` subclass for `BatchNorm2dImpl`.
213
+ /// See the documentation for `BatchNorm2dImpl` class to learn what methods it
214
+ /// provides, and examples of how to use `BatchNorm2d` with
215
+ /// `torch::nn::BatchNorm2dOptions`. See the documentation for `ModuleHolder` to
216
+ /// learn about PyTorch's module storage semantics.
217
+ TORCH_MODULE(BatchNorm2d);
218
+
219
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm3d
220
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
221
+
222
+ /// Applies the BatchNorm3d function.
223
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.BatchNorm3d to learn
224
+ /// about the exact behavior of this module.
225
+ ///
226
+ /// See the documentation for `torch::nn::BatchNorm3dOptions` class to learn
227
+ /// what constructor arguments are supported for this module.
228
+ ///
229
+ /// Example:
230
+ /// ```
231
+ /// BatchNorm3d
232
+ /// model(BatchNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
233
+ /// ```
234
+ class TORCH_API BatchNorm3dImpl : public BatchNormImplBase<3, BatchNorm3dImpl> {
235
+ protected:
236
+ void _check_input_dim(const Tensor& input) override;
237
+
238
+ public:
239
+ using BatchNormImplBase<3, BatchNorm3dImpl>::BatchNormImplBase;
240
+ };
241
+
242
+ /// A `ModuleHolder` subclass for `BatchNorm3dImpl`.
243
+ /// See the documentation for `BatchNorm3dImpl` class to learn what methods it
244
+ /// provides, and examples of how to use `BatchNorm3d` with
245
+ /// `torch::nn::BatchNorm3dOptions`. See the documentation for `ModuleHolder` to
246
+ /// learn about PyTorch's module storage semantics.
247
+ TORCH_MODULE(BatchNorm3d);
248
+
249
+ } // namespace nn
250
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/common.h ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /// This macro enables a module with default arguments in its forward method
4
+ /// to be used in a Sequential module.
5
+ ///
6
+ /// Example usage:
7
+ ///
8
+ /// Let's say we have a module declared like this:
9
+ /// ```
10
+ /// struct MImpl : torch::nn::Module {
11
+ /// public:
12
+ /// explicit MImpl(int value_) : value(value_) {}
13
+ /// torch::Tensor forward(int a, int b = 2, double c = 3.0) {
14
+ /// return torch::tensor(a + b + c);
15
+ /// }
16
+ /// private:
17
+ /// int value;
18
+ /// };
19
+ /// TORCH_MODULE(M);
20
+ /// ```
21
+ ///
22
+ /// If we try to use it in a Sequential module and run forward:
23
+ /// ```
24
+ /// torch::nn::Sequential seq(M(1));
25
+ /// seq->forward(1);
26
+ /// ```
27
+ ///
28
+ /// We will receive the following error message:
29
+ /// ```
30
+ /// MImpl's forward() method expects 3 argument(s), but received 1.
31
+ /// If MImpl's forward() method has default arguments, please make sure
32
+ /// the forward() method is declared with a corresponding
33
+ /// `FORWARD_HAS_DEFAULT_ARGS` macro.
34
+ /// ```
35
+ ///
36
+ /// The right way to fix this error is to use the `FORWARD_HAS_DEFAULT_ARGS`
37
+ /// macro when declaring the module:
38
+ /// ```
39
+ /// struct MImpl : torch::nn::Module {
40
+ /// public:
41
+ /// explicit MImpl(int value_) : value(value_) {}
42
+ /// torch::Tensor forward(int a, int b = 2, double c = 3.0) {
43
+ /// return torch::tensor(a + b + c);
44
+ /// }
45
+ /// protected:
46
+ /// /*
47
+ /// NOTE: looking at the argument list of `forward`:
48
+ /// `forward(int a, int b = 2, double c = 3.0)`
49
+ /// we saw the following default arguments:
50
+ /// ----------------------------------------------------------------
51
+ /// 0-based index of default | Default value of arg
52
+ /// arg in forward arg list | (wrapped by `torch::nn::AnyValue()`)
53
+ /// ----------------------------------------------------------------
54
+ /// 1 | torch::nn::AnyValue(2)
55
+ /// 2 | torch::nn::AnyValue(3.0)
56
+ /// ----------------------------------------------------------------
57
+ /// Thus we pass the following arguments to the `FORWARD_HAS_DEFAULT_ARGS`
58
+ /// macro:
59
+ /// */
60
+ /// FORWARD_HAS_DEFAULT_ARGS({1, torch::nn::AnyValue(2)}, {2,
61
+ /// torch::nn::AnyValue(3.0)})
62
+ /// private:
63
+ /// int value;
64
+ /// };
65
+ /// TORCH_MODULE(M);
66
+ /// ```
67
+ /// Now, running the following would work:
68
+ /// ```
69
+ /// torch::nn::Sequential seq(M(1));
70
+ /// seq->forward(1); // This correctly populates the default arguments for
71
+ /// `MImpl::forward`
72
+ /// ```
73
+ #define FORWARD_HAS_DEFAULT_ARGS(...) \
74
+ template <typename ModuleType, typename... ArgumentTypes> \
75
+ friend struct torch::nn::AnyModuleHolder; \
76
+ bool _forward_has_default_args() override { \
77
+ return true; \
78
+ } \
79
+ unsigned int _forward_num_required_args() override { \
80
+ std::pair<unsigned int, torch::nn::AnyValue> args_info[] = {__VA_ARGS__}; \
81
+ return args_info[0].first; \
82
+ } \
83
+ std::vector<torch::nn::AnyValue> _forward_populate_default_args( \
84
+ std::vector<torch::nn::AnyValue>&& arguments) override { \
85
+ std::pair<unsigned int, torch::nn::AnyValue> args_info[] = {__VA_ARGS__}; \
86
+ unsigned int num_all_args = std::rbegin(args_info)->first + 1; \
87
+ TORCH_INTERNAL_ASSERT( \
88
+ arguments.size() >= _forward_num_required_args() && \
89
+ arguments.size() <= num_all_args); \
90
+ std::vector<torch::nn::AnyValue> ret = std::move(arguments); \
91
+ ret.reserve(num_all_args); \
92
+ for (auto& arg_info : args_info) { \
93
+ if (arg_info.first > ret.size() - 1) \
94
+ ret.emplace_back(std::move(arg_info.second)); \
95
+ } \
96
+ return ret; \
97
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any.h ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/detail/static.h>
4
+ #include <torch/nn/module.h>
5
+ #include <torch/nn/modules/container/any_module_holder.h>
6
+ #include <torch/nn/modules/container/any_value.h>
7
+ #include <torch/nn/pimpl.h>
8
+ #include <torch/types.h>
9
+
10
+ #include <torch/csrc/autograd/variable.h>
11
+ #include <torch/csrc/utils/variadic.h>
12
+
13
+ #include <ATen/Device.h>
14
+
15
+ #include <memory>
16
+ #include <type_traits>
17
+ #include <typeinfo>
18
+ #include <utility>
19
+ #include <vector>
20
+
21
+ namespace torch {
22
+ namespace nn {
23
+
24
+ /// Stores a type erased `Module`.
25
+ ///
26
+ /// The PyTorch C++ API does not impose an interface on the signature of
27
+ /// `forward()` in `Module` subclasses. This gives you complete freedom to
28
+ /// design your `forward()` methods to your liking. However, this also means
29
+ /// there is no unified base type you could store in order to call `forward()`
30
+ /// polymorphically for any module. This is where the `AnyModule` comes in.
31
+ /// Instead of inheritance, it relies on type erasure for polymorphism.
32
+ ///
33
+ /// An `AnyModule` can store any `nn::Module` subclass that provides a
34
+ /// `forward()` method. This `forward()` may accept any types and return any
35
+ /// type. Once stored in an `AnyModule`, you can invoke the underlying module's
36
+ /// `forward()` by calling `AnyModule::forward()` with the arguments you would
37
+ /// supply to the stored module (though see one important limitation below).
38
+ /// Example:
39
+ ///
40
+ /// \rst
41
+ /// .. code-block:: cpp
42
+ ///
43
+ /// struct GenericTrainer {
44
+ /// torch::nn::AnyModule module;
45
+ ///
46
+ /// void train(torch::Tensor input) {
47
+ /// module.forward(input);
48
+ /// }
49
+ /// };
50
+ ///
51
+ /// GenericTrainer trainer1{torch::nn::Linear(3, 4)};
52
+ /// GenericTrainer trainer2{torch::nn::Conv2d(3, 4, 2)};
53
+ /// \endrst
54
+ ///
55
+ /// As `AnyModule` erases the static type of the stored module (and its
56
+ /// `forward()` method) to achieve polymorphism, type checking of arguments is
57
+ /// moved to runtime. That is, passing an argument with an incorrect type to an
58
+ /// `AnyModule` will compile, but throw an exception at runtime:
59
+ ///
60
+ /// \rst
61
+ /// .. code-block:: cpp
62
+ ///
63
+ /// torch::nn::AnyModule module(torch::nn::Linear(3, 4));
64
+ /// // Linear takes a tensor as input, but we are passing an integer.
65
+ /// // This will compile, but throw a `torch::Error` exception at runtime.
66
+ /// module.forward(123);
67
+ /// \endrst
68
+ ///
69
+ /// \rst
70
+ /// .. attention::
71
+ /// One noteworthy limitation of `AnyModule` is that its `forward()` method
72
+ /// does not support implicit conversion of argument types. For example, if
73
+ /// the stored module's `forward()` method accepts a `float` and you call
74
+ /// `any_module.forward(3.4)` (where `3.4` is a `double`), this will throw
75
+ /// an exception.
76
+ /// \endrst
77
+ ///
78
+ /// The return type of the `AnyModule`'s `forward()` method is controlled via
79
+ /// the first template argument to `AnyModule::forward()`. It defaults to
80
+ /// `torch::Tensor`. To change it, you can write `any_module.forward<int>()`,
81
+ /// for example.
82
+ ///
83
+ /// \rst
84
+ /// .. code-block:: cpp
85
+ ///
86
+ /// torch::nn::AnyModule module(torch::nn::Linear(3, 4));
87
+ /// auto output = module.forward(torch::ones({2, 3}));
88
+ ///
89
+ /// struct IntModule {
90
+ /// int forward(int x) { return x; }
91
+ /// };
92
+ /// torch::nn::AnyModule module(IntModule{});
93
+ /// int output = module.forward<int>(5);
94
+ /// \endrst
95
+ ///
96
+ /// The only other method an `AnyModule` provides access to on the stored
97
+ /// module is `clone()`. However, you may acquire a handle on the module via
98
+ /// `.ptr()`, which returns a `shared_ptr<nn::Module>`. Further, if you know
99
+ /// the concrete type of the stored module, you can get a concrete handle to it
100
+ /// using `.get<T>()` where `T` is the concrete module type.
101
+ ///
102
+ /// \rst
103
+ /// .. code-block:: cpp
104
+ ///
105
+ /// torch::nn::AnyModule module(torch::nn::Linear(3, 4));
106
+ /// std::shared_ptr<nn::Module> ptr = module.ptr();
107
+ /// torch::nn::Linear linear(module.get<torch::nn::Linear>());
108
+ /// \endrst
109
+ class AnyModule {
110
+ public:
111
+ /// A default-constructed `AnyModule` is in an empty state.
112
+ AnyModule() = default;
113
+
114
+ /// Constructs an `AnyModule` from a `shared_ptr` to concrete module object.
115
+ template <typename ModuleType>
116
+ explicit AnyModule(std::shared_ptr<ModuleType> module);
117
+
118
+ /// Constructs an `AnyModule` from a concrete module object.
119
+ template <
120
+ typename ModuleType,
121
+ typename = torch::detail::enable_if_module_t<ModuleType>>
122
+ explicit AnyModule(ModuleType&& module);
123
+
124
+ /// Constructs an `AnyModule` from a module holder.
125
+ template <typename ModuleType>
126
+ explicit AnyModule(const ModuleHolder<ModuleType>& module_holder);
127
+
128
+ /// Move construction and assignment is allowed, and follows the default
129
+ /// behavior of move for `std::unique_ptr`.
130
+ AnyModule(AnyModule&&) = default;
131
+ AnyModule& operator=(AnyModule&&) = default;
132
+
133
+ /// Creates a shallow copy of an `AnyModule`.
134
+ AnyModule(const AnyModule& other);
135
+ AnyModule& operator=(const AnyModule& other);
136
+
137
+ /// Creates a deep copy of an `AnyModule` if it contains a module, else an
138
+ /// empty `AnyModule` if it is empty.
139
+ AnyModule clone(optional<Device> device = nullopt) const;
140
+
141
+ /// Assigns a module to the `AnyModule` (to circumvent the explicit
142
+ /// constructor).
143
+ template <typename ModuleType>
144
+ AnyModule& operator=(std::shared_ptr<ModuleType> module);
145
+
146
+ /// Invokes `forward()` on the contained module with the given arguments, and
147
+ /// returns the return value as an `AnyValue`. Use this method when chaining
148
+ /// `AnyModule`s in a loop.
149
+ template <typename... ArgumentTypes>
150
+ AnyValue any_forward(ArgumentTypes&&... arguments);
151
+
152
+ /// Invokes `forward()` on the contained module with the given arguments, and
153
+ /// casts the returned `AnyValue` to the supplied `ReturnType` (which defaults
154
+ /// to `torch::Tensor`).
155
+ template <typename ReturnType = torch::Tensor, typename... ArgumentTypes>
156
+ ReturnType forward(ArgumentTypes&&... arguments);
157
+
158
+ /// Attempts to cast the underlying module to the given module type. Throws an
159
+ /// exception if the types do not match.
160
+ template <typename T, typename = torch::detail::enable_if_module_t<T>>
161
+ T& get();
162
+
163
+ /// Attempts to cast the underlying module to the given module type. Throws an
164
+ /// exception if the types do not match.
165
+ template <typename T, typename = torch::detail::enable_if_module_t<T>>
166
+ const T& get() const;
167
+
168
+ /// Returns the contained module in a `nn::ModuleHolder` subclass if possible
169
+ /// (i.e. if `T` has a constructor for the underlying module type).
170
+ template <typename T, typename ContainedType = typename T::ContainedType>
171
+ T get() const;
172
+
173
+ /// Returns a `std::shared_ptr` whose dynamic type is that of the underlying
174
+ /// module.
175
+ std::shared_ptr<Module> ptr() const;
176
+
177
+ /// Like `ptr()`, but casts the pointer to the given type.
178
+ template <typename T, typename = torch::detail::enable_if_module_t<T>>
179
+ std::shared_ptr<T> ptr() const;
180
+
181
+ /// Returns the `type_info` object of the contained value.
182
+ const std::type_info& type_info() const;
183
+
184
+ /// Returns true if the `AnyModule` does not contain a module.
185
+ bool is_empty() const noexcept;
186
+
187
+ private:
188
+ /// Creates a `unique_ptr<AnyModulePlaceholder>` pointing to a
189
+ /// `AnyModuleHolder` of the correct type. This method is used to deduce the
190
+ /// arguments of the module's `forward()` method.
191
+ template <
192
+ typename ModuleType,
193
+ typename Class,
194
+ typename ReturnType,
195
+ typename... ArgumentTypes>
196
+ std::unique_ptr<AnyModulePlaceholder> make_holder(
197
+ std::shared_ptr<ModuleType>&& module,
198
+ ReturnType (Class::*)(ArgumentTypes...));
199
+
200
+ /// Helper method invoked by const and non-const `get()`.
201
+ template <typename ModuleType, typename ReturnType, typename... ArgumentTypes>
202
+ ModuleType& get_(ReturnType (ModuleType::*)(ArgumentTypes...)) const;
203
+
204
+ /// Helper method invoked by const and non-const `get()`.
205
+ template <typename ModuleType>
206
+ ModuleType& get_() const;
207
+
208
+ /// The type erased module.
209
+ std::unique_ptr<AnyModulePlaceholder> content_;
210
+ };
211
+
212
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModule ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
213
+
214
+ template <typename ModuleType>
215
+ AnyModule::AnyModule(std::shared_ptr<ModuleType> module)
216
+ : content_(make_holder(
217
+ std::move(module),
218
+ &std::remove_reference<ModuleType>::type::forward)) {
219
+ // `AnyModule` can only store an `nn::Module` subclass object that provides
220
+ // a `forward()` method that has a non-templatized return type.
221
+ // (e.g. `AnyModule` cannot store `nn::Sequential`, because `nn::Sequential`'s
222
+ // `forward()` method has a templatized return type.)
223
+ static_assert(
224
+ torch::detail::is_module<ModuleType>::value,
225
+ "Can only store object derived from nn::Module into AnyModule");
226
+ static_assert(
227
+ torch::detail::has_forward<ModuleType>::value,
228
+ "Can only store module with a forward() method that has a non-templatized"
229
+ " argument type and return type into AnyModule (e.g. we cannot store nn::Sequential"
230
+ "into AnyModule, because its forward() method's argument type and return type are templatized."
231
+ " If you need to use nn::Sequentials inside each other you can subclass "
232
+ "nn::Sequential and write a non-templatized forward function for it. You can checkout "
233
+ "https://github.com/pytorch/vision/blob/2f46070f3cb1ea894d82578f3dc5677f82f34958/torchvision/csrc/models/mnasnet.cpp#L59 "
234
+ "for an example on how to do this.).");
235
+ }
236
+
237
+ template <typename ModuleType, typename>
238
+ AnyModule::AnyModule(ModuleType&& module)
239
+ : AnyModule(
240
+ std::make_shared<ModuleType>(std::forward<ModuleType>(module))) {}
241
+
242
+ template <typename ModuleType>
243
+ AnyModule::AnyModule(const ModuleHolder<ModuleType>& module_holder)
244
+ : AnyModule(module_holder.ptr()) {}
245
+
246
+ inline AnyModule::AnyModule(const AnyModule& other)
247
+ : content_(other.content_ ? other.content_->copy() : nullptr) {}
248
+
249
+ inline AnyModule& AnyModule::operator=(const AnyModule& other) {
250
+ if (this != &other) {
251
+ content_ = other.content_ ? other.content_->copy() : nullptr;
252
+ }
253
+ return *this;
254
+ }
255
+
256
+ inline AnyModule AnyModule::clone(optional<Device> device) const {
257
+ AnyModule clone;
258
+ clone.content_ = content_ ? content_->clone_module(device) : nullptr;
259
+ return clone;
260
+ }
261
+
262
+ template <typename ModuleType>
263
+ AnyModule& AnyModule::operator=(std::shared_ptr<ModuleType> module) {
264
+ // NOLINTNEXTLINE(cppcoreguidelines-c-copy-assignment-signature)
265
+ return (*this = AnyModule(std::move(module)));
266
+ }
267
+
268
+ template <typename... ArgumentTypes>
269
+ AnyValue AnyModule::any_forward(ArgumentTypes&&... arguments) {
270
+ TORCH_CHECK(!is_empty(), "Cannot call forward() on an empty AnyModule");
271
+ std::vector<AnyValue> values;
272
+ values.reserve(sizeof...(ArgumentTypes));
273
+ torch::apply(
274
+ [&values](AnyValue&& value) { values.push_back(std::move(value)); },
275
+ AnyValue(std::forward<ArgumentTypes>(arguments))...);
276
+ return content_->forward(std::move(values));
277
+ }
278
+
279
+ template <typename ReturnType, typename... ArgumentTypes>
280
+ ReturnType AnyModule::forward(ArgumentTypes&&... arguments) {
281
+ return any_forward(std::forward<ArgumentTypes>(arguments)...)
282
+ .template get<ReturnType>();
283
+ }
284
+
285
+ template <typename T, typename>
286
+ T& AnyModule::get() {
287
+ TORCH_CHECK(!is_empty(), "Cannot call get() on an empty AnyModule");
288
+ return get_<T>();
289
+ }
290
+
291
+ template <typename T, typename>
292
+ const T& AnyModule::get() const {
293
+ TORCH_CHECK(!is_empty(), "Cannot call get() on an empty AnyModule");
294
+ return get_<T>();
295
+ }
296
+
297
+ template <typename T, typename ContainedType>
298
+ T AnyModule::get() const {
299
+ return T(ptr<ContainedType>());
300
+ }
301
+
302
+ inline std::shared_ptr<Module> AnyModule::ptr() const {
303
+ TORCH_CHECK(!is_empty(), "Cannot call ptr() on an empty AnyModule");
304
+ return content_->ptr();
305
+ }
306
+
307
+ template <typename T, typename>
308
+ std::shared_ptr<T> AnyModule::ptr() const {
309
+ TORCH_CHECK(!is_empty(), "Cannot call ptr() on an empty AnyModule");
310
+ // Call get() but discard the value, just to do the type checking.
311
+ get_<T>();
312
+ return std::dynamic_pointer_cast<T>(ptr());
313
+ }
314
+
315
+ inline const std::type_info& AnyModule::type_info() const {
316
+ TORCH_CHECK(!is_empty(), "Cannot call type_info() on an empty AnyModule");
317
+ return content_->type_info;
318
+ }
319
+
320
+ inline bool AnyModule::is_empty() const noexcept {
321
+ return content_ == nullptr;
322
+ }
323
+
324
+ // Private Methods
325
+
326
+ template <
327
+ typename ModuleType,
328
+ typename Class,
329
+ typename ReturnType,
330
+ typename... ArgumentTypes>
331
+ std::unique_ptr<AnyModulePlaceholder> AnyModule::make_holder(
332
+ std::shared_ptr<ModuleType>&& module,
333
+ ReturnType (Class::*)(ArgumentTypes...)) {
334
+ static_assert(
335
+ torch::detail::check_not_lvalue_references<ArgumentTypes...>(),
336
+ "Modules stored inside AnyModule must not take references. "
337
+ "Use pointers instead.");
338
+ static_assert(
339
+ !std::is_void<ReturnType>::value,
340
+ "AnyModule cannot store modules that return void "
341
+ "(you can return a dummy value).");
342
+ return std::make_unique<
343
+ AnyModuleHolder<decay_t<ModuleType>, ArgumentTypes...>>(
344
+ std::move(module));
345
+ }
346
+
347
+ template <typename ModuleType>
348
+ ModuleType& AnyModule::get_() const {
349
+ using M = typename std::remove_reference<ModuleType>::type;
350
+ static_assert(
351
+ torch::detail::has_forward<M>::value,
352
+ "Can only call AnyModule::get<T> with a type T that has a forward method");
353
+ return get_(&M::forward);
354
+ }
355
+
356
+ template <typename ModuleType, typename ReturnType, typename... ArgumentTypes>
357
+ ModuleType& AnyModule::get_(
358
+ ReturnType (ModuleType::*)(ArgumentTypes...)) const {
359
+ if (typeid(ModuleType).hash_code() == type_info().hash_code()) {
360
+ return *static_cast<AnyModuleHolder<ModuleType, ArgumentTypes...>&>(
361
+ *content_)
362
+ .module;
363
+ }
364
+ AT_ERROR(
365
+ "Attempted to cast module of type ",
366
+ c10::demangle(type_info().name()),
367
+ " to type ",
368
+ c10::demangle(typeid(ModuleType).name()));
369
+ }
370
+
371
+ } // namespace nn
372
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_module_holder.h ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/modules/container/any_value.h>
4
+
5
+ namespace torch {
6
+ namespace nn {
7
+
8
+ class Module;
9
+
10
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModulePlaceholder ~~~~~~~~~~~~~~~~~~~~~~~~~~
11
+
12
+ /// The static type of the object we store in the `AnyModule`, which erases
13
+ /// the actual type, but allows us to call `forward()` on the underlying
14
+ /// module.
15
+ struct AnyModulePlaceholder : public AnyValue::Placeholder {
16
+ using AnyValue::Placeholder::Placeholder;
17
+
18
+ /// The "erased" `forward()` method.
19
+ virtual AnyValue forward(std::vector<AnyValue>&& arguments) = 0;
20
+
21
+ /// Returns std::shared_ptr<Module> pointing to the erased module.
22
+ virtual std::shared_ptr<Module> ptr() = 0;
23
+
24
+ /// Returns a `AnyModulePlaceholder` with a shallow copy of this `AnyModule`.
25
+ virtual std::unique_ptr<AnyModulePlaceholder> copy() const = 0;
26
+
27
+ /// Returns a `AnyModulePlaceholder` with a deep copy of this `AnyModule`.
28
+ virtual std::unique_ptr<AnyModulePlaceholder> clone_module(
29
+ optional<Device> device) const = 0;
30
+ };
31
+
32
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModuleHolder ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
33
+
34
+ /// The dynamic type of the object stored in the `AnyModule`. It contains the
35
+ /// concrete instance to which all calls are forwarded. It is parameterized
36
+ /// over the concrete type of the module, and the types of the arguments the
37
+ /// module takes in its `forward()` method.
38
+ template <typename ModuleType, typename... ArgumentTypes>
39
+ struct AnyModuleHolder : public AnyModulePlaceholder {
40
+ /// \internal
41
+ struct CheckedGetter {
42
+ template <typename T>
43
+ decay_t<T>&& operator()(size_t index) {
44
+ AT_ASSERT(index < arguments_.size());
45
+ auto& value = arguments_[index];
46
+ if (auto* maybe_value = value.template try_get<decay_t<T>>()) {
47
+ return std::move(*maybe_value);
48
+ }
49
+ AT_ERROR(
50
+ "Expected argument #",
51
+ index,
52
+ " to be of type ",
53
+ c10::demangle(typeid(T).name()),
54
+ ", but received value of type ",
55
+ c10::demangle(value.type_info().name()));
56
+ }
57
+ std::vector<AnyValue>& arguments_;
58
+ };
59
+
60
+ /// \internal
61
+ struct InvokeForward {
62
+ template <typename... Ts>
63
+ AnyValue operator()(Ts&&... ts) {
64
+ return AnyValue(module_->forward(std::forward<Ts>(ts)...));
65
+ }
66
+ std::shared_ptr<ModuleType>& module_;
67
+ };
68
+
69
+ /// Constructs the `AnyModuleHolder` from a concrete module.
70
+ explicit AnyModuleHolder(std::shared_ptr<ModuleType>&& module_)
71
+ : AnyModulePlaceholder(typeid(ModuleType)), module(std::move(module_)) {}
72
+
73
+ /// Calls `forward()` on the underlying module, casting each `AnyValue` in the
74
+ /// argument vector to a concrete value.
75
+ AnyValue forward(std::vector<AnyValue>&& arguments) override {
76
+ if (module->_forward_has_default_args()) {
77
+ TORCH_CHECK(
78
+ arguments.size() >= module->_forward_num_required_args() &&
79
+ arguments.size() <= sizeof...(ArgumentTypes),
80
+ c10::demangle(type_info.name()),
81
+ "'s forward() method expects at least ",
82
+ module->_forward_num_required_args(),
83
+ " argument(s) and at most ",
84
+ sizeof...(ArgumentTypes),
85
+ " argument(s), but received ",
86
+ arguments.size(),
87
+ ".");
88
+ arguments = std::move(
89
+ module->_forward_populate_default_args(std::move(arguments)));
90
+ } else {
91
+ std::string use_default_args_macro_prompt = " If " +
92
+ c10::demangle(type_info.name()) +
93
+ "'s forward() method has default arguments, " +
94
+ "please make sure the forward() method is declared with a corresponding `FORWARD_HAS_DEFAULT_ARGS` macro.";
95
+ TORCH_CHECK(
96
+ arguments.size() == sizeof...(ArgumentTypes),
97
+ c10::demangle(type_info.name()),
98
+ "'s forward() method expects ",
99
+ sizeof...(ArgumentTypes),
100
+ " argument(s), but received ",
101
+ arguments.size(),
102
+ ".",
103
+ (arguments.size() < sizeof...(ArgumentTypes))
104
+ ? use_default_args_macro_prompt
105
+ : "");
106
+ }
107
+
108
+ // FYI: During invocation of a module's `forward()` method, the values live
109
+ // in the `arguments` vector inside this function.
110
+ return torch::unpack<AnyValue, ArgumentTypes...>(
111
+ InvokeForward{module}, CheckedGetter{arguments});
112
+ }
113
+
114
+ std::shared_ptr<Module> ptr() override {
115
+ return module;
116
+ }
117
+
118
+ std::unique_ptr<AnyModulePlaceholder> copy() const override {
119
+ return std::make_unique<AnyModuleHolder>(*this);
120
+ }
121
+
122
+ std::unique_ptr<AnyModulePlaceholder> clone_module(
123
+ optional<Device> device) const override {
124
+ return std::make_unique<AnyModuleHolder>(
125
+ std::dynamic_pointer_cast<ModuleType>(module->clone(device)));
126
+ }
127
+
128
+ /// The actual concrete module instance.
129
+ std::shared_ptr<ModuleType> module;
130
+ };
131
+
132
+ } // namespace nn
133
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_value.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/detail/static.h>
4
+ #include <torch/nn/module.h>
5
+ #include <torch/nn/pimpl.h>
6
+ #include <torch/types.h>
7
+
8
+ #include <torch/csrc/autograd/variable.h>
9
+ #include <torch/csrc/utils/variadic.h>
10
+
11
+ #include <memory>
12
+ #include <type_traits>
13
+ #include <typeinfo>
14
+ #include <utility>
15
+
16
+ namespace torch {
17
+ namespace nn {
18
+
19
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyValue ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20
+
21
+ /// An implementation of `std::any` which stores
22
+ /// a type erased object, whose concrete value can be retrieved at runtime by
23
+ /// checking if the `typeid()` of a requested type matches the `typeid()` of
24
+ /// the object stored.
25
+ class AnyValue {
26
+ public:
27
+ /// Move construction and assignment is allowed, and follows the default
28
+ /// behavior of move for `std::unique_ptr`.
29
+ AnyValue(AnyValue&&) = default;
30
+ AnyValue& operator=(AnyValue&&) = default;
31
+
32
+ /// Copy construction and assignment is allowed.
33
+ AnyValue(const AnyValue& other) : content_(other.content_->clone()) {}
34
+ AnyValue& operator=(const AnyValue& other) {
35
+ content_ = other.content_->clone();
36
+ return *this;
37
+ }
38
+
39
+ /// Constructs the `AnyValue` from value type.
40
+ template <typename T>
41
+ // NOLINTNEXTLINE(bugprone-forwarding-reference-overload)
42
+ explicit AnyValue(T&& value)
43
+ : content_(std::make_unique<Holder<decay_t<T>>>(std::forward<T>(value))) {
44
+ }
45
+
46
+ /// Returns a pointer to the value contained in the `AnyValue` if the type
47
+ /// passed as template parameter matches the type of the value stored, and
48
+ /// returns a null pointer otherwise.
49
+ template <typename T>
50
+ T* try_get() {
51
+ static_assert(
52
+ !std::is_reference<T>::value,
53
+ "AnyValue stores decayed types, you cannot cast it to a reference type");
54
+ static_assert(
55
+ !std::is_array<T>::value,
56
+ "AnyValue stores decayed types, you must cast it to T* instead of T[]");
57
+ if (typeid(T).hash_code() == type_info().hash_code()) {
58
+ return &static_cast<Holder<T>&>(*content_).value;
59
+ }
60
+ return nullptr;
61
+ }
62
+
63
+ /// Returns the value contained in the `AnyValue` if the type passed as
64
+ /// template parameter matches the type of the value stored, and throws an
65
+ /// exception otherwise.
66
+ template <typename T>
67
+ T get() {
68
+ if (auto* maybe_value = try_get<T>()) {
69
+ return *maybe_value;
70
+ }
71
+ AT_ERROR(
72
+ "Attempted to cast AnyValue to ",
73
+ c10::demangle(typeid(T).name()),
74
+ ", but its actual type is ",
75
+ c10::demangle(type_info().name()));
76
+ }
77
+
78
+ /// Returns the `type_info` object of the contained value.
79
+ const std::type_info& type_info() const noexcept {
80
+ return content_->type_info;
81
+ }
82
+
83
+ private:
84
+ friend struct AnyModulePlaceholder;
85
+ friend struct TestAnyValue;
86
+
87
+ /// \internal
88
+ /// The static type of the object we store in the `AnyValue`, which erases the
89
+ /// actual object's type, allowing us only to check the `type_info` of the
90
+ /// type stored in the dynamic type.
91
+ struct Placeholder {
92
+ explicit Placeholder(const std::type_info& type_info_) noexcept
93
+ : type_info(type_info_) {}
94
+ Placeholder(const Placeholder&) = default;
95
+ Placeholder(Placeholder&&) = default;
96
+ virtual ~Placeholder() = default;
97
+ virtual std::unique_ptr<Placeholder> clone() const {
98
+ TORCH_CHECK(false, "clone() should only be called on `AnyValue::Holder`");
99
+ }
100
+ const std::type_info& type_info;
101
+ };
102
+
103
+ /// \internal
104
+ /// The dynamic type of the object we store in the `AnyValue`, which hides the
105
+ /// actual object we have erased in this `AnyValue`.
106
+ template <typename T>
107
+ struct Holder : public Placeholder {
108
+ /// A template because T&& would not be universal reference here.
109
+ template <typename U>
110
+ // NOLINTNEXTLINE(bugprone-forwarding-reference-overload)
111
+ explicit Holder(U&& value_) noexcept
112
+ : Placeholder(typeid(T)), value(std::forward<U>(value_)) {}
113
+ std::unique_ptr<Placeholder> clone() const override {
114
+ return std::make_unique<Holder<T>>(value);
115
+ }
116
+ T value;
117
+ };
118
+
119
+ /// The type erased object.
120
+ std::unique_ptr<Placeholder> content_;
121
+ };
122
+
123
+ } // namespace nn
124
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/functional.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/utils/variadic.h>
5
+ #include <torch/nn/cloneable.h>
6
+ #include <torch/nn/pimpl.h>
7
+ #include <torch/types.h>
8
+
9
+ #include <functional>
10
+ #include <utility>
11
+
12
+ namespace torch {
13
+ namespace nn {
14
+
15
+ /// Wraps a function in a `Module`.
16
+ ///
17
+ /// The `Functional` module allows wrapping an arbitrary function or function
18
+ /// object in an `nn::Module`. This is primarily handy for usage in
19
+ /// `Sequential`.
20
+ ///
21
+ /// \rst
22
+ /// .. code-block:: cpp
23
+ ///
24
+ /// Sequential sequential(
25
+ /// Linear(3, 4),
26
+ /// Functional(torch::relu),
27
+ /// BatchNorm1d(3),
28
+ /// Functional(torch::elu, /*alpha=*/1));
29
+ /// \endrst
30
+ ///
31
+ /// While a `Functional` module only accepts a single `Tensor` as input, it is
32
+ /// possible for the wrapped function to accept further arguments. However,
33
+ /// these have to be bound *at construction time*. For example, if
34
+ /// you want to wrap `torch::leaky_relu`, which accepts a `slope` scalar as its
35
+ /// second argument, with a particular value for its `slope` in a `Functional`
36
+ /// module, you could write
37
+ ///
38
+ /// \rst
39
+ /// .. code-block:: cpp
40
+ ///
41
+ /// Functional(torch::leaky_relu, /*slope=*/0.5)
42
+ /// \endrst
43
+ ///
44
+ /// The value of `0.5` is then stored within the `Functional` object and
45
+ /// supplied to the function call at invocation time. Note that such bound
46
+ /// values are evaluated eagerly and stored a single time. See the documentation
47
+ /// of [std::bind](https://en.cppreference.com/w/cpp/utility/functional/bind)
48
+ /// for more information on the semantics of argument binding.
49
+ ///
50
+ /// \rst
51
+ /// .. attention::
52
+ /// After passing any bound arguments, the function must accept a single
53
+ /// tensor and return a single tensor.
54
+ /// \endrst
55
+ ///
56
+ /// Note that `Functional` overloads the call operator (`operator()`) such that
57
+ /// you can invoke it with `my_func(...)`.
58
+ class TORCH_API FunctionalImpl : public torch::nn::Cloneable<FunctionalImpl> {
59
+ public:
60
+ using Function = std::function<Tensor(Tensor)>;
61
+
62
+ /// Constructs a `Functional` from a function object.
63
+ explicit FunctionalImpl(Function function);
64
+
65
+ template <
66
+ typename SomeFunction,
67
+ typename... Args,
68
+ typename = torch::enable_if_t<(sizeof...(Args) > 0)>>
69
+ explicit FunctionalImpl(SomeFunction original_function, Args&&... args)
70
+ // NOLINTNEXTLINE(modernize-avoid-bind)
71
+ : function_(std::bind(
72
+ original_function,
73
+ /*input=*/std::placeholders::_1,
74
+ std::forward<Args>(args)...)) {
75
+ // std::bind is normally evil, but (1) gcc is broken w.r.t. handling
76
+ // parameter pack expansion in lambdas and (2) moving parameter packs into
77
+ // a lambda only works with C++14, so std::bind is the more move-aware
78
+ // solution here.
79
+ }
80
+
81
+ void reset() override;
82
+
83
+ /// Pretty prints the `Functional` module into the given `stream`.
84
+ void pretty_print(std::ostream& stream) const override;
85
+
86
+ /// Forwards the `input` tensor to the underlying (bound) function object.
87
+ Tensor forward(Tensor input);
88
+
89
+ /// Calls forward(input).
90
+ Tensor operator()(Tensor input);
91
+
92
+ bool is_serializable() const override;
93
+
94
+ private:
95
+ Function function_;
96
+ };
97
+
98
+ /// A `ModuleHolder` subclass for `FunctionalImpl`.
99
+ /// See the documentation for `FunctionalImpl` class to learn what methods it
100
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
101
+ /// module storage semantics.
102
+ TORCH_MODULE(Functional);
103
+
104
+ } // namespace nn
105
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/moduledict.h ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/module.h>
5
+ #include <torch/ordered_dict.h>
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+
11
+ /// An OrderedDict of `Module`s that registers its elements by their `key`s.
12
+ ///
13
+ /// \rst
14
+ /// .. code-block:: cpp
15
+ ///
16
+ /// torch::OrderedDict<std::string, std::shared_ptr<Module>> ordereddict = {
17
+ /// {"linear", Linear(10, 3).ptr()},
18
+ /// {"conv", Conv2d(1, 2, 3).ptr()},
19
+ /// {"dropout", Dropout(0.5).ptr()},
20
+ /// };
21
+ /// torch::nn::ModuleDict dict1(ordereddict);
22
+ ///
23
+ /// for (const auto &module : *dict1) {
24
+ /// module->pretty_print(std::cout);
25
+ /// }
26
+ ///
27
+ /// std::vector<std::pair<std::string, std::shared_ptr<Module>>> list = {
28
+ /// {"linear", Linear(10, 3).ptr()},
29
+ /// {"conv", Conv2d(1, 2, 3).ptr()},
30
+ /// {"dropout", Dropout(0.5).ptr()},
31
+ /// };
32
+ /// torch::nn::ModuleDict dict2(list);
33
+ ///
34
+ /// for (const auto &module : *dict2) {
35
+ /// module->pretty_print(std::cout);
36
+ /// }
37
+ ///
38
+ /// \endrst
39
+ ///
40
+ /// Why should you use `ModuleDict` instead of a simple `map` or `OrderedDict`?
41
+ /// The value a `ModuleDict` provides over manually calling an ordered map of
42
+ /// modules is that it allows treating the whole container *as a single module*,
43
+ /// such that performing a transformation on the `ModuleDict` applies to each of
44
+ /// the modules it stores (which are each a registered submodule of the
45
+ /// `ModuleDict`). For example, calling `.to(torch::kCUDA)` on a `ModuleDict`
46
+ /// will move each module in the map to CUDA memory. For example:
47
+ ///
48
+ /// \rst
49
+ /// .. code-block:: cpp
50
+ ///
51
+ /// torch::OrderedDict<std::string, std::shared_ptr<Module>> ordereddict = {
52
+ /// {"linear", Linear(10, 3).ptr()},
53
+ /// {"conv", Conv2d(1, 2, 3).ptr()},
54
+ /// {"dropout", Dropout(0.5).ptr()},
55
+ /// };
56
+ /// torch::nn::ModuleDict dict(ordereddict);
57
+ ///
58
+ /// // Convert all modules to CUDA.
59
+ /// dict->to(torch::kCUDA);
60
+ ///
61
+ /// \endrst
62
+ ///
63
+ /// Finally, `ModuleDict` provides a lightweight container API, such as allowing
64
+ /// iteration over submodules, positional access, adding new modules from a
65
+ /// vector of key-module pairs or an `OrderedDict` or another `ModuleDict` after
66
+ /// construction via `update`.
67
+ class ModuleDictImpl : public Cloneable<ModuleDictImpl> {
68
+ public:
69
+ using Iterator =
70
+ torch::OrderedDict<std::string, std::shared_ptr<Module>>::Iterator;
71
+ using ConstIterator =
72
+ torch::OrderedDict<std::string, std::shared_ptr<Module>>::ConstIterator;
73
+
74
+ ModuleDictImpl() = default;
75
+
76
+ /// Constructs the `ModuleDict` from a list of string-Module pairs.
77
+ explicit ModuleDictImpl(
78
+ const std::vector<std::pair<std::string, std::shared_ptr<Module>>>&
79
+ modules) {
80
+ update(modules);
81
+ }
82
+
83
+ /// Constructs the `ModuleDict` from an `OrderedDict`.
84
+ explicit ModuleDictImpl(
85
+ const torch::OrderedDict<std::string, std::shared_ptr<Module>>& modules) {
86
+ update(modules);
87
+ }
88
+
89
+ /// Return the items in the `ModuleDict`.
90
+ std::vector<std::pair<std::string, std::shared_ptr<Module>>> items() const {
91
+ return modules_.pairs();
92
+ }
93
+
94
+ /// Return the keys in the `ModuleDict`.
95
+ std::vector<std::string> keys() const {
96
+ return modules_.keys();
97
+ }
98
+
99
+ /// Return the values in the `ModuleDict`.
100
+ std::vector<std::shared_ptr<Module>> values() const {
101
+ return modules_.values();
102
+ }
103
+
104
+ /// Return an iterator to the start of `ModuleDict`.
105
+ Iterator begin() {
106
+ return modules_.begin();
107
+ }
108
+
109
+ /// Return a const iterator to the start of `ModuleDict`.
110
+ ConstIterator begin() const {
111
+ return modules_.begin();
112
+ }
113
+
114
+ /// Return an iterator to the end of `ModuleDict`.
115
+ Iterator end() {
116
+ return modules_.end();
117
+ }
118
+
119
+ /// Return a const iterator to the end of `ModuleDict`.
120
+ ConstIterator end() const {
121
+ return modules_.end();
122
+ }
123
+
124
+ /// Return the number of items currently stored in the `ModuleDict`.
125
+ size_t size() const noexcept {
126
+ return modules_.size();
127
+ }
128
+
129
+ /// Return true if the `ModuleDict` is empty, otherwise return false.
130
+ bool empty() const noexcept {
131
+ return modules_.is_empty();
132
+ }
133
+
134
+ /// Check if the centain parameter with the key in the `ModuleDict`.
135
+ bool contains(const std::string& key) const noexcept {
136
+ return modules_.contains(key);
137
+ }
138
+
139
+ /// Remove all items from the `ModuleDict`.
140
+ void clear() {
141
+ // Not remove the registration of modules to make it consistent with python
142
+ // version.
143
+ modules_.clear();
144
+ }
145
+
146
+ /// Special cloning function for `ModuleDict` because it does not use
147
+ /// `reset()`.
148
+ std::shared_ptr<Module> clone(
149
+ const optional<Device>& device = nullopt) const override {
150
+ auto clone = std::make_shared<ModuleDictImpl>();
151
+ for (const auto& module : modules_) {
152
+ clone->insert(module.key(), module.value()->clone(device));
153
+ }
154
+ return clone;
155
+ }
156
+
157
+ /// `reset()` is empty for `ModuleDict`, since it does not have parameters of
158
+ /// its own.
159
+ void reset() override {}
160
+
161
+ /// Pretty prints the `ModuleDict` into the given `stream`.
162
+ void pretty_print(std::ostream& stream) const override {
163
+ stream << "torch::nn::ModuleDict";
164
+ }
165
+
166
+ /// Attempts to returns the `Module` associated with the given `key`. Throws
167
+ /// an exception if no such `key` is stored in the `ModuleDict`. Check
168
+ /// contains(key) before for a non-throwing way of access.
169
+ std::shared_ptr<Module> operator[](const std::string& key) const {
170
+ return modules_[key];
171
+ }
172
+
173
+ /// Attempts to return the module at the given key as the requested type.
174
+ /// Throws an exception if no such `key` is stored in the `ModuleDict`.
175
+ /// Check contains(key) before for a non-throwing way of access.
176
+ template <typename T>
177
+ T& at(const std::string& key) {
178
+ static_assert(
179
+ torch::detail::is_module<T>::value,
180
+ "Can only call ModuleList::at with an nn::Module type");
181
+ auto module = modules_[key]->as<T>();
182
+ TORCH_CHECK(
183
+ module,
184
+ "Unable to cast module[",
185
+ key,
186
+ "] to ",
187
+ c10::demangle(typeid(T).name()));
188
+ return *module;
189
+ }
190
+
191
+ /// Attempts to return the module at the given key as the requested type.
192
+ /// Throws an exception if no such `key` is stored in the `ModuleDict`.
193
+ /// Check contains(key) before for a non-throwing way of access.
194
+ template <typename T>
195
+ const T& at(const std::string& key) const {
196
+ static_assert(
197
+ torch::detail::is_module<T>::value,
198
+ "Can only call ModuleList::at with an nn::Module type");
199
+ const auto module = modules_[key]->as<T>();
200
+ TORCH_CHECK(
201
+ module,
202
+ "Unable to cast module[",
203
+ key,
204
+ "] to ",
205
+ c10::demangle(typeid(T).name()));
206
+ return *module;
207
+ }
208
+
209
+ /// Removes and returns the `Module` associated with the given `key`.
210
+ /// Throws an exception if no such `key` is stored in the `ModuleDict`.
211
+ /// Check contains(key) before for a non-throwing way of access.
212
+ std::shared_ptr<Module> pop(const std::string& key) {
213
+ auto module = modules_[key];
214
+ modules_.erase(key);
215
+ // Not remove the registration of the module to make it consistent with
216
+ // python version.
217
+ return module;
218
+ }
219
+
220
+ /// Updated the `ModuleDict` with a vector of key-module pairs.
221
+ void update(
222
+ const std::vector<std::pair<std::string, std::shared_ptr<Module>>>&
223
+ modules) {
224
+ for (auto& item : modules) {
225
+ insert(item.first, item.second);
226
+ }
227
+ }
228
+
229
+ /// Updated the `ModuleDict` with key-value pairs from `OrderedDict` or
230
+ /// `ModuleDict`.
231
+ template <typename Container>
232
+ void update(const Container& container) {
233
+ for (auto& item : container) {
234
+ insert(item.key(), item.value());
235
+ }
236
+ }
237
+
238
+ private:
239
+ /// Private `OrderedDict` holding the key-Module pairs.
240
+ torch::OrderedDict<std::string, std::shared_ptr<Module>> modules_;
241
+
242
+ /// Insert a key-module pair by overwriting existing keys,
243
+ /// and register or replace the `Module`.
244
+ void insert(const std::string& key, std::shared_ptr<Module> module) {
245
+ if (contains(key)) {
246
+ modules_[key] = std::move(module);
247
+ replace_module(key, modules_[key]);
248
+ } else {
249
+ modules_.insert(key, std::move(module));
250
+ register_module(key, modules_.back().value());
251
+ }
252
+ }
253
+ };
254
+
255
+ /// A `ModuleHolder` subclass for `ModuleDictImpl`.
256
+ /// See the documentation for `ModuleDictImpl` class to learn what methods it
257
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
258
+ /// module storage semantics.
259
+ TORCH_MODULE(ModuleDict);
260
+
261
+ } // namespace nn
262
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/modulelist.h ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/nn/cloneable.h>
5
+ #include <torch/nn/module.h>
6
+
7
+ #include <utility>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace nn {
12
+
13
+ /// A list of `Module`s that registers its elements.
14
+ ///
15
+ /// \rst
16
+ /// .. code-block:: cpp
17
+ ///
18
+ /// torch::nn::ModuleList mlist(
19
+ /// torch::nn::Linear(3, 4),
20
+ /// torch::nn::BatchNorm1d(4),
21
+ /// torch::nn::Dropout(0.5)
22
+ /// );
23
+ ///
24
+ /// for (const auto &module : *mlist) {
25
+ /// module->pretty_print(std::cout);
26
+ /// }
27
+ ///
28
+ /// \endrst
29
+ ///
30
+ /// Why should you use `ModuleList` instead of a simple `std::vector`? The value
31
+ /// a `ModuleList` provides over manually calling a sequence of modules is that
32
+ /// it allows treating the whole container *as a single module*, such that
33
+ /// performing a transformation on the `ModuleList` applies to each of the
34
+ /// modules it stores (which are each a registered submodule of the
35
+ /// `ModuleList`). For example, calling
36
+ /// `.to(torch::kCUDA)` on a `ModuleList` will move each module in the list to
37
+ /// CUDA memory. For example:
38
+ ///
39
+ /// \rst
40
+ /// .. code-block:: cpp
41
+ ///
42
+ /// torch::nn::ModuleList mlist(
43
+ /// torch::nn::Linear(3, 4),
44
+ /// torch::nn::BatchNorm1d(4),
45
+ /// torch::nn::Dropout(0.5)
46
+ /// );
47
+ ///
48
+ /// // Convert all modules to CUDA.
49
+ /// mlist->to(torch::kCUDA);
50
+ ///
51
+ /// \endrst
52
+ ///
53
+ /// Finally, `ModuleList` provides a lightweight container API, such as allowing
54
+ /// iteration over submodules, positional access, adding a new module after
55
+ /// construction via `push_back`, as well as joining two `ModuleList`s via
56
+ /// `extend`.
57
+ class ModuleListImpl : public Cloneable<ModuleListImpl> {
58
+ public:
59
+ using Iterator = std::vector<std::shared_ptr<Module>>::iterator;
60
+ using ConstIterator = std::vector<std::shared_ptr<Module>>::const_iterator;
61
+
62
+ ModuleListImpl() = default;
63
+
64
+ /// Constructs the `ModuleList` from a variadic list of modules.
65
+ template <typename... Modules>
66
+ explicit ModuleListImpl(Modules&&... modules) {
67
+ modules_.reserve(sizeof...(Modules));
68
+ push_back_var(std::forward<Modules>(modules)...);
69
+ }
70
+
71
+ /// Special cloning function for `ModuleList` because it does not use
72
+ /// `reset()`.
73
+ std::shared_ptr<Module> clone(
74
+ const optional<Device>& device = nullopt) const override {
75
+ auto clone = std::make_shared<ModuleListImpl>();
76
+ for (const auto& module : modules_) {
77
+ clone->push_back(module->clone(device));
78
+ }
79
+ return clone;
80
+ }
81
+
82
+ /// `reset()` is empty for `ModuleList`, since it does not have parameters of
83
+ /// its own.
84
+ void reset() override {}
85
+
86
+ /// Pretty prints the `ModuleList` module into the given `stream`.
87
+ void pretty_print(std::ostream& stream) const override {
88
+ stream << "torch::nn::ModuleList";
89
+ }
90
+
91
+ void push_back(std::shared_ptr<Module> module) {
92
+ modules_.push_back(std::move(module));
93
+ const auto index = modules_.size() - 1;
94
+ register_module(c10::to_string(index), modules_[index]);
95
+ }
96
+
97
+ /// Adds a new `Module` to the `ModuleList` container, moving or copying
98
+ /// it into a `shared_ptr` internally. This method allows passing value types,
99
+ /// and letting the container deal with the boxing.
100
+ template <typename M, typename = torch::detail::enable_if_module_t<M>>
101
+ void push_back(M&& module) {
102
+ using Type = typename std::remove_reference<M>::type;
103
+ push_back(std::make_shared<Type>(std::forward<M>(module)));
104
+ }
105
+
106
+ /// Unwraps the contained module of a `ModuleHolder` and adds it to the
107
+ /// `ModuleList`.
108
+ template <typename M>
109
+ void push_back(const ModuleHolder<M>& module_holder) {
110
+ push_back(module_holder.ptr());
111
+ }
112
+
113
+ /// Iterates over the container and calls `push_back()` on each value.
114
+ template <typename Container>
115
+ void extend(const Container& container) {
116
+ for (const auto& module : container) {
117
+ push_back(module);
118
+ }
119
+ }
120
+
121
+ /// Returns an iterator to the start of the `ModuleList`.
122
+ Iterator begin() {
123
+ return modules_.begin();
124
+ }
125
+
126
+ /// Returns a const iterator to the start of the `ModuleList`.
127
+ ConstIterator begin() const {
128
+ return modules_.begin();
129
+ }
130
+
131
+ /// Returns an iterator to the end of the `ModuleList`.
132
+ Iterator end() {
133
+ return modules_.end();
134
+ }
135
+
136
+ /// Returns a const iterator to the end of the `ModuleList`.
137
+ ConstIterator end() const {
138
+ return modules_.end();
139
+ }
140
+
141
+ /// Attempts to return the module at the given index as the requested type.
142
+ /// Throws an exception if the index is out of bounds or the types do not
143
+ /// match.
144
+ template <typename T>
145
+ T& at(size_t index) {
146
+ static_assert(
147
+ torch::detail::is_module<T>::value,
148
+ "Can only call ModuleList::at with an nn::Module type");
149
+ TORCH_CHECK(index < size(), "Index out of range");
150
+ auto module = modules_[index]->as<T>();
151
+ TORCH_CHECK(
152
+ module,
153
+ "Unable to cast module[",
154
+ index,
155
+ "] to ",
156
+ c10::demangle(typeid(T).name()));
157
+ return *module;
158
+ }
159
+
160
+ /// Attempts to return the module at the given index as the requested type.
161
+ /// Throws an exception if the index is out of bounds or the types do not
162
+ /// match.
163
+ template <typename T>
164
+ const T& at(size_t index) const {
165
+ static_assert(
166
+ torch::detail::is_module<T>::value,
167
+ "Can only call ModuleList::at with an nn::Module type");
168
+ TORCH_CHECK(index < size(), "Index out of range");
169
+ const auto module = modules_[index]->as<T>();
170
+ TORCH_CHECK(
171
+ module,
172
+ "Unable to cast module[",
173
+ index,
174
+ "] to ",
175
+ c10::demangle(typeid(T).name()));
176
+ return *module;
177
+ }
178
+
179
+ /// Attempts to return a `std::shared_ptr` whose dynamic type is that of the
180
+ /// underlying module at the given index. Throws an exception if the index is
181
+ /// out of bounds.
182
+ std::shared_ptr<Module> ptr(size_t index) const {
183
+ TORCH_CHECK(index < size(), "Index out of range");
184
+ return modules_[index];
185
+ }
186
+
187
+ /// Attempts to return a `std::shared_ptr` whose type is the one provided.
188
+ /// Throws an exception if the index is out of bounds or the types do not
189
+ /// match.
190
+ template <typename T>
191
+ std::shared_ptr<T> ptr(size_t index) const {
192
+ static_assert(
193
+ torch::detail::is_module<T>::value,
194
+ "Can only call ModuleList::ptr with an nn::Module type");
195
+ TORCH_CHECK(index < size(), "Index out of range");
196
+ return std::dynamic_pointer_cast<T>(modules_[index]);
197
+ }
198
+
199
+ /// Like `ptr(index)`.
200
+ std::shared_ptr<Module> operator[](size_t index) const {
201
+ // This is the only method we can call without a type.
202
+ return ptr(index);
203
+ }
204
+
205
+ /// The current size of the `ModuleList` container.
206
+ size_t size() const noexcept {
207
+ return modules_.size();
208
+ }
209
+
210
+ /// True if there are no modules in the `ModuleList`.
211
+ bool is_empty() const noexcept {
212
+ return size() == 0;
213
+ }
214
+
215
+ void insert(size_t index, std::shared_ptr<Module> module) {
216
+ TORCH_CHECK(index <= size(), "Index out of range");
217
+
218
+ if (index == size())
219
+ push_back(std::move(module));
220
+ else {
221
+ modules_.insert(
222
+ modules_.begin() + Iterator::difference_type(index),
223
+ std::move(module));
224
+
225
+ for (const auto i : c10::irange(index, size() - 1)) {
226
+ (void)i; // Suppress unused variable warning
227
+ replace_module(c10::to_string(index), modules_[index]);
228
+ }
229
+ register_module(c10::to_string(size() - 1), modules_.back());
230
+ }
231
+ }
232
+
233
+ /// Unwraps the contained module of a `ModuleHolder` and inserts it in the
234
+ /// `ModuleList`.
235
+ template <typename M>
236
+ void insert(size_t index, const ModuleHolder<M>& module_holder) {
237
+ insert(index, module_holder.ptr());
238
+ }
239
+
240
+ /// inserts a new `Module` to the `ModuleList` container, moving or copying
241
+ /// it into a `shared_ptr` internally. This method allows passing value types,
242
+ /// and letting the container deal with the boxing.
243
+ template <typename M, typename = torch::detail::enable_if_module_t<M>>
244
+ void insert(size_t index, M&& module) {
245
+ using Type = typename std::remove_reference<M>::type;
246
+ insert(index, std::make_shared<Type>(std::forward<M>(module)));
247
+ }
248
+
249
+ private:
250
+ template <typename Head, typename... Tail>
251
+ void push_back_var(Head&& head, Tail&&... tail) {
252
+ push_back(std::forward<Head>(head));
253
+ // Recursively calls this method, until the parameter pack only thas this
254
+ // entry left. Then calls `push_back()` a final time (above).
255
+ push_back_var(std::forward<Tail>(tail)...);
256
+ }
257
+
258
+ /// The base case, when the list of modules is empty.
259
+ void push_back_var() {}
260
+
261
+ // Box the AnyModules to give ModuleList reference semantics, like the rest of
262
+ // the API. Note that this is not required otherwise, this could just be a
263
+ // `vector<AnyModule>`.
264
+ std::vector<std::shared_ptr<Module>> modules_;
265
+ };
266
+
267
+ /// A `ModuleHolder` subclass for `ModuleListImpl`.
268
+ /// See the documentation for `ModuleListImpl` class to learn what methods it
269
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
270
+ /// module storage semantics.
271
+ TORCH_MODULE(ModuleList);
272
+
273
+ } // namespace nn
274
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/named_any.h ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/detail/static.h>
4
+ #include <torch/nn/module.h>
5
+ #include <torch/nn/modules/container/any.h>
6
+ #include <torch/nn/pimpl.h>
7
+ #include <torch/types.h>
8
+
9
+ #include <torch/csrc/autograd/variable.h>
10
+ #include <torch/csrc/utils/variadic.h>
11
+
12
+ #include <ATen/Device.h>
13
+
14
+ #include <initializer_list>
15
+ #include <memory>
16
+ #include <type_traits>
17
+ #include <typeinfo>
18
+ #include <utility>
19
+ #include <vector>
20
+
21
+ namespace torch {
22
+ namespace nn {
23
+
24
+ /// Stores a type erased `Module` with name.
25
+ ///
26
+ /// The `NamedAnyModule` class enables the following API for constructing
27
+ /// `nn::Sequential` with named submodules:
28
+ /// \rst
29
+ /// .. code-block:: cpp
30
+ ///
31
+ /// struct M : torch::nn::Module {
32
+ /// explicit M(int value_) : value(value_) {}
33
+ /// int value;
34
+ /// int forward() {
35
+ /// return value;
36
+ /// }
37
+ /// };
38
+ ///
39
+ /// Sequential sequential({
40
+ /// {"m1", std::make_shared<M>(1)}, // shared pointer to `Module` is
41
+ /// supported {std::string("m2"), M(2)}, // `Module` is supported
42
+ /// {"linear1", Linear(10, 3)} // `ModuleHolder` is supported
43
+ /// });
44
+ /// \endrst
45
+ class NamedAnyModule {
46
+ public:
47
+ /// Creates a `NamedAnyModule` from a (boxed) `Module`.
48
+ template <typename ModuleType>
49
+ NamedAnyModule(std::string name, std::shared_ptr<ModuleType> module_ptr)
50
+ : NamedAnyModule(std::move(name), AnyModule(std::move(module_ptr))) {}
51
+
52
+ /// Creates a `NamedAnyModule` from a `Module`, moving or copying it
53
+ /// into a `shared_ptr` internally.
54
+ // NOTE: We need to use `std::remove_reference<M>::type` to get rid of
55
+ // any reference components for make_unique.
56
+ template <typename M, typename = torch::detail::enable_if_module_t<M>>
57
+ NamedAnyModule(std::string name, M&& module)
58
+ : NamedAnyModule(
59
+ std::move(name),
60
+ std::make_shared<typename std::remove_reference<M>::type>(
61
+ std::forward<M>(module))) {}
62
+
63
+ /// Creates a `NamedAnyModule` from a `Module` that is unwrapped from
64
+ /// a `ModuleHolder`.
65
+ template <typename M>
66
+ NamedAnyModule(std::string name, const ModuleHolder<M>& module_holder)
67
+ : NamedAnyModule(std::move(name), module_holder.ptr()) {}
68
+
69
+ /// Creates a `NamedAnyModule` from a type-erased `AnyModule`.
70
+ NamedAnyModule(std::string name, AnyModule any_module)
71
+ : name_(std::move(name)), module_(std::move(any_module)) {}
72
+
73
+ /// Returns a reference to the name.
74
+ const std::string& name() const noexcept {
75
+ return name_;
76
+ }
77
+
78
+ /// Returns a reference to the module.
79
+ AnyModule& module() noexcept {
80
+ return module_;
81
+ }
82
+
83
+ /// Returns a const reference to the module.
84
+ const AnyModule& module() const noexcept {
85
+ return module_;
86
+ }
87
+
88
+ private:
89
+ std::string name_;
90
+ AnyModule module_;
91
+ };
92
+
93
+ } // namespace nn
94
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterdict.h ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/pimpl.h>
5
+ #include <torch/ordered_dict.h>
6
+ #include <utility>
7
+ #include <vector>
8
+
9
+ namespace torch {
10
+ namespace nn {
11
+
12
+ class ParameterDictImpl : public Cloneable<ParameterDictImpl> {
13
+ public:
14
+ using Iterator = OrderedDict<std::string, Tensor>::Iterator;
15
+ using ConstIterator = OrderedDict<std::string, Tensor>::ConstIterator;
16
+
17
+ ParameterDictImpl() = default;
18
+
19
+ explicit ParameterDictImpl(
20
+ const torch::OrderedDict<std::string, torch::Tensor>& params) {
21
+ parameters_ = params;
22
+ }
23
+
24
+ /// `reset()` is empty for `ParameterDict`, since it does not have
25
+ /// parameters of its own.
26
+ void reset() override {}
27
+
28
+ /// Pretty prints the `ParameterDict` module into the given `stream`.
29
+ void pretty_print(std::ostream& stream) const override {
30
+ stream << "torch::nn::ParameterDict(" << std::endl;
31
+ for (const auto& pair : parameters_) {
32
+ stream << "(" << pair.key() << ")"
33
+ << ": Parameter containing: [" << pair.value().scalar_type()
34
+ << " of size " << pair.value().sizes() << "]";
35
+ ;
36
+ stream << std::endl;
37
+ }
38
+ stream << ")";
39
+ }
40
+
41
+ /// Insert the parameter along with the key into ParameterDict
42
+ /// The parameter is set to be require grad by default
43
+ Tensor& insert(std::string key, Tensor param) {
44
+ bool requires_grad = param.requires_grad();
45
+ return register_parameter(std::move(key), std::move(param), requires_grad);
46
+ }
47
+
48
+ /// Remove key from the ParameterDict and return its value, throw exception
49
+ /// if the key is not contained. Please check contains(key) before for a
50
+ /// non-throwing access.
51
+ Tensor pop(const std::string& key) {
52
+ torch::Tensor v = parameters_[key];
53
+ parameters_.erase(key);
54
+ return v;
55
+ }
56
+
57
+ /// Return the keys in the dict
58
+ ::std::vector<std::string> keys() const {
59
+ return parameters_.keys();
60
+ }
61
+
62
+ /// Return the Values in the dict
63
+ ::std::vector<torch::Tensor> values() const {
64
+ return parameters_.values();
65
+ }
66
+
67
+ /// Return an iterator to the start of ParameterDict
68
+ Iterator begin() {
69
+ return parameters_.begin();
70
+ }
71
+
72
+ /// Return a const iterator to the start of ParameterDict
73
+ ConstIterator begin() const {
74
+ return parameters_.begin();
75
+ }
76
+
77
+ /// Return an iterator to the end of ParameterDict
78
+ Iterator end() {
79
+ return parameters_.end();
80
+ }
81
+
82
+ /// Return a const iterator to the end of ParameterDict
83
+ ConstIterator end() const {
84
+ return parameters_.end();
85
+ }
86
+
87
+ /// Return the number of items currently stored in the ParameterDict
88
+ size_t size() const noexcept {
89
+ return parameters_.size();
90
+ }
91
+
92
+ /// Return true if the ParameterDict is empty, otherwise return false
93
+ bool empty() const noexcept {
94
+ return parameters_.is_empty();
95
+ }
96
+
97
+ /// Update the ParameterDict with the key-value pairs from
98
+ /// another ParameterDict, overwriting existing key
99
+ template <typename Container>
100
+ void update(const Container& container) {
101
+ for (auto& item : container) {
102
+ parameters_[item.key()] = item.value();
103
+ }
104
+ }
105
+
106
+ /// Remove all parameters in the ParameterDict
107
+ void clear() {
108
+ parameters_.clear();
109
+ }
110
+
111
+ /// Check if the centain parameter with the key in the ParameterDict
112
+ bool contains(const std::string& key) const noexcept {
113
+ return parameters_.contains(key);
114
+ }
115
+
116
+ /// Returns the value associated with the given `key`. Throws an exception if
117
+ /// no such key is stored in the `ParameterDict`. Check contains(key) before
118
+ /// for a non-throwing way of access
119
+ const Tensor& get(const std::string& key) const {
120
+ return parameters_[key];
121
+ }
122
+
123
+ /// Returns the value associated with the given `key`. Throws an exception if
124
+ /// no such key is stored in the `ParameterDict`. Check contains(key) before
125
+ /// for a non-throwing way of access
126
+ Tensor& get(const std::string& key) {
127
+ return parameters_[key];
128
+ }
129
+
130
+ /// Returns the value associated with the given `key`. Throws an exception if
131
+ /// no such key is stored in the `ParameterDict`. Check contains(key) before
132
+ /// for a non-throwing way of access
133
+ Tensor& operator[](const std::string& key) {
134
+ return parameters_[key];
135
+ }
136
+
137
+ /// Returns the value associated with the given `key`. Throws an exception if
138
+ /// no such key is stored in the `ParameterDict`. Check contains(key) before
139
+ /// for a non-throwing way of access
140
+ const Tensor& operator[](const std::string& key) const {
141
+ return parameters_[key];
142
+ }
143
+ };
144
+
145
+ TORCH_MODULE(ParameterDict);
146
+
147
+ } // namespace nn
148
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterlist.h ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/module.h>
5
+
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+ class ParameterListImpl : public Cloneable<ParameterListImpl> {
11
+ public:
12
+ using Iterator = typename std::vector<
13
+ OrderedDict<std::string, torch::Tensor>::Item>::iterator;
14
+ using ConstIterator = typename std::vector<
15
+ OrderedDict<std::string, torch::Tensor>::Item>::const_iterator;
16
+
17
+ ParameterListImpl() = default;
18
+
19
+ /// Constructs the `ParameterList` from a variadic list of ParameterList.
20
+ template <typename... Tensors>
21
+ explicit ParameterListImpl(Tensors&&... params) {
22
+ parameters_.reserve(sizeof...(Tensors));
23
+ push_back_var(std::forward<Tensors>(params)...);
24
+ }
25
+
26
+ template <typename... Tensors>
27
+ explicit ParameterListImpl(const Tensors&... params) {
28
+ parameters_.reserve(sizeof...(Tensors));
29
+ push_back_var(std::forward<Tensors>(params)...);
30
+ }
31
+
32
+ /// `reset()` is empty for `ParameterList`, since it does not have parameters
33
+ /// of its own.
34
+ void reset() override {}
35
+
36
+ /// Pretty prints the `ParameterList` module into the given `stream`.
37
+ void pretty_print(std::ostream& stream) const override {
38
+ stream << "torch::nn::ParameterList(" << std::endl;
39
+ for (const auto& pair : parameters_) {
40
+ stream << "(" << pair.key() << ")"
41
+ << ": Parameter containing: [" << pair.value().scalar_type()
42
+ << " of size " << pair.value().sizes() << "]";
43
+ ;
44
+ stream << std::endl;
45
+ }
46
+ stream << ")";
47
+ }
48
+
49
+ /// push the a given parameter at the end of the list
50
+ void append(torch::Tensor&& param) {
51
+ bool requires_grad = param.requires_grad();
52
+ register_parameter(
53
+ c10::to_string(parameters_.size()), std::move(param), requires_grad);
54
+ }
55
+
56
+ /// push the a given parameter at the end of the list
57
+ void append(const torch::Tensor& param) {
58
+ bool requires_grad = param.requires_grad();
59
+ register_parameter(
60
+ c10::to_string(parameters_.size()), param, requires_grad);
61
+ }
62
+
63
+ /// push the a given parameter at the end of the list
64
+ /// And the key of the pair will be discarded, only the value
65
+ /// will be added into the `ParameterList`
66
+ void append(const OrderedDict<std::string, torch::Tensor>::Item& pair) {
67
+ register_parameter(
68
+ c10::to_string(parameters_.size()),
69
+ pair.value(),
70
+ pair.value().requires_grad());
71
+ }
72
+
73
+ /// extend parameters from a container to the end of the list
74
+ template <typename Container>
75
+ void extend(const Container& container) {
76
+ for (const auto& param : container) {
77
+ append(param);
78
+ }
79
+ }
80
+
81
+ /// Returns an iterator to the start of the ParameterList
82
+ /// the iterator returned will be type of `OrderedDict<std::string,
83
+ /// torch::Tensor>::Item`
84
+ Iterator begin() {
85
+ return parameters_.begin();
86
+ }
87
+
88
+ /// Returns a const iterator to the start of the ParameterList
89
+ /// the iterator returned will be type of `OrderedDict<std::string,
90
+ /// torch::Tensor>::Item`
91
+ ConstIterator begin() const {
92
+ return parameters_.begin();
93
+ }
94
+
95
+ /// Returns an iterator to the end of the ParameterList
96
+ /// the iterator returned will be type of `OrderedDict<std::string,
97
+ /// torch::Tensor>::Item`
98
+ Iterator end() {
99
+ return parameters_.end();
100
+ }
101
+
102
+ /// Returns a const iterator to the end of the ParameterList
103
+ /// the iterator returned will be type of `OrderedDict<std::string,
104
+ /// torch::Tensor>::Item`
105
+ ConstIterator end() const {
106
+ return parameters_.end();
107
+ }
108
+
109
+ /// Returns the value associated with the given `key`. Throws an exception if
110
+ /// no such key is stored in the `ParameterList`. Check contains(key) before
111
+ /// for a non-throwing way of access
112
+ at::Tensor& at(size_t idx) {
113
+ TORCH_CHECK(idx < size(), "Index out of range");
114
+ return parameters_[c10::to_string(idx)];
115
+ }
116
+
117
+ /// Returns the value associated with the given `key`. Throws an exception if
118
+ /// no such key is stored in the `ParameterList`. Check contains(key) before
119
+ /// for a non-throwing way of access
120
+ const at::Tensor& at(size_t idx) const {
121
+ TORCH_CHECK(idx < size(), "Index out of range");
122
+ return parameters_[c10::to_string(idx)];
123
+ }
124
+
125
+ /// Returns the value associated with the given `key`. Throws an exception if
126
+ /// no such key is stored in the `ParameterList`. Check contains(key) before
127
+ /// for a non-throwing way of access
128
+ at::Tensor& operator[](size_t idx) {
129
+ return at(idx);
130
+ }
131
+
132
+ /// Returns the value associated with the given `key`. Throws an exception if
133
+ /// no such key is stored in the `ParameterList`. Check contains(key) before
134
+ /// for a non-throwing way of access
135
+ const at::Tensor& operator[](size_t idx) const {
136
+ return at(idx);
137
+ }
138
+
139
+ /// Return the size of the ParameterList
140
+ size_t size() const noexcept {
141
+ return parameters_.size();
142
+ }
143
+ /// True if the ParameterList is empty
144
+ bool is_empty() const noexcept {
145
+ return parameters_.is_empty();
146
+ }
147
+
148
+ /// Overload the +=, so that two ParameterList could be incrementally added
149
+ template <typename Container>
150
+ Container& operator+=(const Container& other) {
151
+ extend(other);
152
+ return *this;
153
+ }
154
+
155
+ private:
156
+ template <typename Head, typename... Tail>
157
+ void push_back_var(Head&& head, Tail&&... tail) {
158
+ append(std::forward<Head>(head));
159
+ // Recursively calls this method, until the parameter pack only thas this
160
+ // entry left. Then calls `push_back()` a final time (above).
161
+ push_back_var(std::forward<Tail>(tail)...);
162
+ }
163
+
164
+ /// The base case, when the list of modules is empty.
165
+ void push_back_var() {}
166
+ };
167
+ TORCH_MODULE(ParameterList);
168
+ } // namespace nn
169
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/sequential.h ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/detail/static.h>
4
+ #include <torch/nn/cloneable.h>
5
+ #include <torch/nn/module.h>
6
+ #include <torch/nn/modules/container/any.h>
7
+ #include <torch/nn/modules/container/named_any.h>
8
+ #include <torch/nn/pimpl.h>
9
+ #include <torch/types.h>
10
+
11
+ #include <c10/util/Exception.h>
12
+
13
+ #include <cstdint>
14
+ #include <memory>
15
+ #include <ostream>
16
+ #include <string>
17
+ #include <type_traits>
18
+ #include <utility>
19
+ #include <vector>
20
+
21
+ namespace torch {
22
+ namespace nn {
23
+
24
+ /// A list of `Module`s that acts as a `Module` itself.
25
+ ///
26
+ /// A `Sequential` is fundamentally a list of `Module`s, each with a `forward()`
27
+ /// method. `Sequential` provides a `forward()` method of its own, which accepts
28
+ /// any input and forwards it to the first module it stores. It then "chains"
29
+ /// outputs to inputs sequentially for each subsequent module, finally returning
30
+ /// the output of the last module. For example:
31
+ ///
32
+ /// \rst
33
+ /// .. code-block:: cpp
34
+ ///
35
+ /// torch::nn::Sequential seq(
36
+ /// torch::nn::Linear(3, 4),
37
+ /// torch::nn::BatchNorm1d(4),
38
+ /// torch::nn::Dropout(0.5)
39
+ /// );
40
+ ///
41
+ /// auto output = seq->forward(torch::ones(3));
42
+ ///
43
+ /// \endrst
44
+ ///
45
+ /// This can conceptually be thought of as the following loop (using Python as
46
+ /// pseudocode):
47
+ ///
48
+ /// \rst
49
+ /// .. code-block:: python
50
+ ///
51
+ /// def forward(sequential, input):
52
+ /// for module in sequential:
53
+ /// input = module(input)
54
+ /// return input
55
+ ///
56
+ /// \endrst
57
+ ///
58
+ /// Why should you use `Sequential` instead of a simple `std::vector`? The value
59
+ /// a `Sequential` provides over manually calling a sequence of modules is that
60
+ /// it allows treating the whole container *as a single module*, such that
61
+ /// performing a transformation on the `Sequential` applies to each of the
62
+ /// modules it stores (which are each a registered submodule of the
63
+ /// `Sequential`). For example, calling
64
+ /// `.to(torch::kCUDA)` on a `Sequential` will move each module in the list to
65
+ /// CUDA memory. For example:
66
+ ///
67
+ /// \rst
68
+ /// .. code-block:: cpp
69
+ ///
70
+ /// torch::nn::Sequential seq(
71
+ /// torch::nn::Linear(3, 4),
72
+ /// torch::nn::BatchNorm1d(4),
73
+ /// torch::nn::Dropout(0.5)
74
+ /// );
75
+ ///
76
+ /// // Convert all modules to CUDA.
77
+ /// seq->to(torch::kCUDA);
78
+ ///
79
+ /// \endrst
80
+ ///
81
+ /// Finally, `Sequential` provides a lightweight container API, such as allowing
82
+ /// iteration over submodules, positional access, adding a new module after
83
+ /// construction via `push_back`, as well as joining two `Sequential`s via
84
+ /// `extend`.
85
+ ///
86
+ /// \rst
87
+ /// .. attention::
88
+ /// One current limitation of `Sequential` is that all except the first module
89
+ /// must accept a single argument. If your modules need to take multiple
90
+ /// arguments, you should define them to take and return tuples.
91
+ /// \endrst
92
+ class SequentialImpl : public Cloneable<SequentialImpl> {
93
+ public:
94
+ using Iterator = std::vector<AnyModule>::iterator;
95
+ using ConstIterator = std::vector<AnyModule>::const_iterator;
96
+
97
+ SequentialImpl() = default;
98
+
99
+ /// Constructs the `Sequential` from a variadic list of modules.
100
+ template <typename... Modules>
101
+ explicit SequentialImpl(Modules&&... modules) {
102
+ modules_.reserve(sizeof...(Modules));
103
+ push_back(std::forward<Modules>(modules)...);
104
+ }
105
+
106
+ /// Constructs the `Sequential` from an `OrderedDict` of named `AnyModule`s.
107
+ explicit SequentialImpl(
108
+ torch::OrderedDict<std::string, AnyModule>&& ordered_dict) {
109
+ modules_.reserve(ordered_dict.size());
110
+ for (auto& item : ordered_dict) {
111
+ push_back(item.key(), std::move(item.value()));
112
+ }
113
+ }
114
+
115
+ /// Constructs the `Sequential` from a braced-init-list of named `AnyModule`s.
116
+ /// It enables the following use case:
117
+ /// `Sequential sequential({{"m1", M(1)}, {"m2", M(2)}})`
118
+ explicit SequentialImpl(std::initializer_list<NamedAnyModule> named_modules) {
119
+ modules_.reserve(named_modules.size());
120
+ for (const auto& named_module : named_modules) {
121
+ push_back(named_module.name(), named_module.module());
122
+ }
123
+ }
124
+
125
+ /// Special cloning function for `Sequential` because it does not use
126
+ /// `reset()`.
127
+ std::shared_ptr<Module> clone(
128
+ const optional<Device>& device = nullopt) const override {
129
+ auto clone = std::make_shared<SequentialImpl>();
130
+ for (const auto& module : modules_) {
131
+ clone->push_back(module.clone(device));
132
+ }
133
+ return clone;
134
+ }
135
+
136
+ /// `reset()` is empty for `Sequential`, since it does not have parameters of
137
+ /// its own.
138
+ void reset() override {}
139
+
140
+ /// Pretty prints the `Sequential` module into the given `stream`.
141
+ void pretty_print(std::ostream& stream) const override {
142
+ stream << "torch::nn::Sequential";
143
+ }
144
+
145
+ /// Feeds `inputs` to the first module and then chains outputs to inputs,
146
+ /// returning the last output.
147
+ ///
148
+ /// Conceptually the following loop in Python:
149
+ ///
150
+ /// \rst
151
+ /// .. code-block:: python
152
+ ///
153
+ /// def forward(sequential, input):
154
+ /// for module in sequential:
155
+ /// input = module(input)
156
+ /// return input
157
+ ///
158
+ /// \endrst
159
+ ///
160
+ /// The return type is taken as the first template parameter. It defaults to
161
+ /// `Tensor`. If the last module in the `Sequential` returns another type `T`,
162
+ /// you should call `forward<T>(inputs)` instead of just `forward(inputs)`:
163
+ ///
164
+ /// \rst
165
+ /// .. code-block:: cpp
166
+ ///
167
+ /// torch::Tensor tensor = sequential1->forward(inputs);
168
+ /// int integer = sequential2->forward<int>(inputs);
169
+ /// float value = sequential3->forward<float>(inputs);
170
+ ///
171
+ /// \endrst
172
+ template <typename ReturnType = Tensor, typename... InputTypes>
173
+ ReturnType forward(InputTypes&&... inputs) {
174
+ TORCH_CHECK(!is_empty(), "Cannot call forward() on an empty Sequential");
175
+
176
+ auto iterator = modules_.begin();
177
+ auto input = iterator->any_forward(std::forward<InputTypes>(inputs)...);
178
+
179
+ for (++iterator; iterator != modules_.end(); ++iterator) {
180
+ input = iterator->any_forward(std::move(input));
181
+ }
182
+
183
+ // Check the return value and give a nice error message if the requested
184
+ // return type was incorrect.
185
+ if (auto* return_value = input.template try_get<ReturnType>()) {
186
+ return std::move(*return_value);
187
+ }
188
+ AT_ERROR(
189
+ "The type of the return value is ",
190
+ c10::demangle(input.type_info().name()),
191
+ ", but you asked for type ",
192
+ c10::demangle(typeid(ReturnType).name()));
193
+ }
194
+
195
+ /// Adds a new (boxed) `Module` to the `Sequential` container.
196
+ template <typename ModuleType>
197
+ void push_back(std::shared_ptr<ModuleType> module_ptr) {
198
+ push_back(c10::to_string(modules_.size()), std::move(module_ptr));
199
+ }
200
+
201
+ /// Adds a new named (boxed) `Module` to the `Sequential` container.
202
+ template <typename ModuleType>
203
+ void push_back(std::string name, std::shared_ptr<ModuleType> module_ptr) {
204
+ push_back(std::move(name), AnyModule(std::move(module_ptr)));
205
+ }
206
+
207
+ /// Adds a new `Module` to the `Sequential` container, moving or copying it
208
+ /// into a `shared_ptr` internally. This method allows passing value types,
209
+ /// and letting the container deal with the boxing. This means you can write
210
+ /// `Sequential(Module(3, 4))` instead of
211
+ /// `Sequential(std::make_shared<Module>(3, 4))`.
212
+ template <typename M, typename = torch::detail::enable_if_module_t<M>>
213
+ void push_back(M&& module) {
214
+ push_back(c10::to_string(modules_.size()), std::forward<M>(module));
215
+ }
216
+
217
+ /// Adds a new named `Module` to the `Sequential` container, moving or copying
218
+ /// it into a `shared_ptr` internally. This method allows passing value types,
219
+ /// and letting the container deal with the boxing.
220
+ template <typename M, typename = torch::detail::enable_if_module_t<M>>
221
+ void push_back(std::string name, M&& module) {
222
+ using Type = typename std::remove_reference<M>::type;
223
+ push_back(std::move(name), std::make_shared<Type>(std::forward<M>(module)));
224
+ }
225
+
226
+ /// Unwraps the contained module of a `ModuleHolder` and adds it to the
227
+ /// `Sequential`.
228
+ template <typename M>
229
+ void push_back(const ModuleHolder<M>& module_holder) {
230
+ push_back(c10::to_string(modules_.size()), module_holder);
231
+ }
232
+
233
+ /// Unwraps the contained named module of a `ModuleHolder` and adds it to the
234
+ /// `Sequential`.
235
+ template <typename M>
236
+ void push_back(std::string name, const ModuleHolder<M>& module_holder) {
237
+ push_back(std::move(name), module_holder.ptr());
238
+ }
239
+
240
+ /// Iterates over the container and calls `push_back()` on each value.
241
+ template <typename Container>
242
+ void extend(const Container& container) {
243
+ for (const auto& module : container) {
244
+ push_back(module);
245
+ }
246
+ }
247
+
248
+ /// Adds a type-erased `AnyModule` to the `Sequential`.
249
+ void push_back(AnyModule any_module) {
250
+ push_back(c10::to_string(modules_.size()), std::move(any_module));
251
+ }
252
+
253
+ void push_back(std::string name, AnyModule any_module) {
254
+ modules_.push_back(std::move(any_module));
255
+ const auto index = modules_.size() - 1;
256
+ register_module(std::move(name), modules_[index].ptr());
257
+ }
258
+
259
+ /// Returns an iterator to the start of the `Sequential`.
260
+ Iterator begin() {
261
+ return modules_.begin();
262
+ }
263
+
264
+ /// Returns a const iterator to the start of the `Sequential`.
265
+ ConstIterator begin() const {
266
+ return modules_.begin();
267
+ }
268
+
269
+ /// Returns an iterator to the end of the `Sequential`.
270
+ Iterator end() {
271
+ return modules_.end();
272
+ }
273
+
274
+ /// Returns a const iterator to the end of the `Sequential`.
275
+ ConstIterator end() const {
276
+ return modules_.end();
277
+ }
278
+
279
+ /// Attempts to return the module at the given index as the requested type.
280
+ /// Throws an exception if the index is out of bounds or the types do not
281
+ /// match.
282
+ template <typename T>
283
+ T& at(size_t index) {
284
+ static_assert(
285
+ torch::detail::is_module<T>::value,
286
+ "Can only call Sequential::at with an nn::Module type");
287
+ TORCH_CHECK(index < size(), "Index out of range");
288
+ return modules_[index].get<T>();
289
+ }
290
+
291
+ /// Attempts to return the module at the given index as the requested type.
292
+ /// Throws an exception if the index is out of bounds or the types do not
293
+ /// match.
294
+ template <typename T>
295
+ const T& at(size_t index) const {
296
+ static_assert(
297
+ torch::detail::is_module<T>::value,
298
+ "Can only call Sequential::at with an nn::Module type");
299
+ TORCH_CHECK(index < size(), "Index out of range");
300
+ return modules_[index].get<T>();
301
+ }
302
+
303
+ /// Attempts to return a `std::shared_ptr` whose dynamic type is that of the
304
+ /// underlying module at the given index. Throws an exception if the index is
305
+ /// out of bounds.
306
+ std::shared_ptr<Module> ptr(size_t index) const {
307
+ TORCH_CHECK(index < size(), "Index out of range");
308
+ return modules_[index].ptr();
309
+ }
310
+
311
+ /// Attempts to return a `std::shared_ptr` whose type is the one provided.
312
+ /// Throws an exception if the index is out of bounds or the types do not
313
+ /// match.
314
+ template <typename T>
315
+ std::shared_ptr<T> ptr(size_t index) const {
316
+ static_assert(
317
+ torch::detail::is_module<T>::value,
318
+ "Can only call Sequential::ptr with an nn::Module type");
319
+ TORCH_CHECK(index < size(), "Index out of range");
320
+ return modules_[index].ptr<T>();
321
+ }
322
+
323
+ /// Like `ptr(index)`.
324
+ std::shared_ptr<Module> operator[](size_t index) const {
325
+ // This is the only method we can call without a type.
326
+ return ptr(index);
327
+ }
328
+
329
+ /// The current size of the `Sequential` container.
330
+ size_t size() const noexcept {
331
+ return modules_.size();
332
+ }
333
+
334
+ /// True if there are no modules in the `Sequential`.
335
+ bool is_empty() const noexcept {
336
+ return size() == 0;
337
+ }
338
+
339
+ private:
340
+ /// Takes a First *and* Second parameter, to avoid ambiguity when a parameter
341
+ /// pack has only one type, in which case the template would be preferred,
342
+ /// even if the other `push_back` functions are better fits (e.g. `unique_ptr`
343
+ /// -> `shared_ptr` overload).
344
+ /// NOTE: We explicitly avoid matching this template with
345
+ /// `push_back(std::string("name"), module)` or `push_back("name", module)`,
346
+ /// since they should be handled by their respective `push_back` functions.
347
+ template <
348
+ typename First,
349
+ typename Second,
350
+ typename... Rest,
351
+ typename = torch::disable_if_t<
352
+ std::is_same<First, std::string>::value ||
353
+ // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
354
+ std::is_same<
355
+ typename std::decay<First>::type,
356
+ std::decay<const char (&)[]>::type>::value>>
357
+ void push_back(First&& first, Second&& second, Rest&&... rest) {
358
+ push_back(std::forward<First>(first));
359
+ // Recursively calls this method, until the parameter pack only thas this
360
+ // entry left. Then calls `push_back()` a final time (above).
361
+ push_back(std::forward<Second>(second), std::forward<Rest>(rest)...);
362
+ }
363
+
364
+ /// The base case, when the list of modules is empty.
365
+ void push_back() {}
366
+
367
+ // Box the AnyModules to give Sequential reference semantics, like the rest of
368
+ // the API. Note that this is not required otherwise, this could just be a
369
+ // `vector<AnyModule>`.
370
+ std::vector<AnyModule> modules_;
371
+ };
372
+
373
+ /// A `ModuleHolder` subclass for `SequentialImpl`.
374
+ /// See the documentation for `SequentialImpl` class to learn what methods it
375
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
376
+ /// module storage semantics.
377
+ class Sequential : public torch::nn::ModuleHolder<SequentialImpl> {
378
+ public:
379
+ using torch::nn::ModuleHolder<SequentialImpl>::ModuleHolder;
380
+
381
+ Sequential() : ModuleHolder() {}
382
+
383
+ /// Constructs the `Sequential` from a braced-init-list of named `AnyModule`s.
384
+ /// It enables the following use case:
385
+ /// `Sequential sequential({{"m1", M(1)}, {"m2", M(2)}})`
386
+ Sequential(std::initializer_list<NamedAnyModule> named_modules)
387
+ : ModuleHolder(std::make_shared<SequentialImpl>(named_modules)) {}
388
+ };
389
+ } // namespace nn
390
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/conv.h ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <c10/util/overloaded.h>
5
+
6
+ #include <torch/expanding_array.h>
7
+ #include <torch/nn/cloneable.h>
8
+ #include <torch/nn/init.h>
9
+ #include <torch/nn/modules/common.h>
10
+ #include <torch/nn/modules/utils.h>
11
+ #include <torch/nn/options/conv.h>
12
+ #include <torch/nn/pimpl.h>
13
+ #include <torch/types.h>
14
+
15
+ #include <torch/csrc/Export.h>
16
+
17
+ #include <cstddef>
18
+ #include <vector>
19
+
20
+ namespace torch {
21
+ namespace nn {
22
+
23
+ /// Base class for all (dimension-specialized) convolution modules.
24
+ template <size_t D, typename Derived>
25
+ class ConvNdImpl : public torch::nn::Cloneable<Derived> {
26
+ public:
27
+ explicit ConvNdImpl(detail::ConvNdOptions<D> options_)
28
+ : options(std::move(options_)) {
29
+ // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall)
30
+ reset();
31
+ }
32
+
33
+ void reset() override {
34
+ TORCH_CHECK(
35
+ options.in_channels() > 0 && options.groups() > 0 &&
36
+ options.out_channels() > 0,
37
+ "in_channels, groups and out_channels must be a positive integer.");
38
+ TORCH_CHECK(
39
+ options.in_channels() % options.groups() == 0,
40
+ "in_channels must be divisible by groups");
41
+ TORCH_CHECK(
42
+ options.out_channels() % options.groups() == 0,
43
+ "out_channels must be divisible by groups");
44
+
45
+ std::visit(
46
+ c10::overloaded(
47
+ [&](enumtype::kValid) {
48
+ _reversed_padding_repeated_twice.resize(2 * D);
49
+ std::fill_n(_reversed_padding_repeated_twice.begin(), 2 * D, 0);
50
+ },
51
+ [&](enumtype::kSame) {
52
+ for (const auto i : c10::irange(D)) {
53
+ const auto stride = (*options.stride())[i];
54
+ TORCH_CHECK(
55
+ stride == 1,
56
+ "padding='same' is not supported for strided convolutions");
57
+ }
58
+
59
+ _reversed_padding_repeated_twice.resize(2 * D);
60
+ for (const auto i : c10::irange(D)) {
61
+ const auto dilation = (*options.dilation())[i];
62
+ const auto kernel_size = (*options.kernel_size())[i];
63
+ const auto total_padding = dilation * (kernel_size - 1);
64
+ auto left_pad = total_padding / 2;
65
+ auto right_pad = total_padding - left_pad;
66
+ _reversed_padding_repeated_twice[2 * i] = left_pad;
67
+ _reversed_padding_repeated_twice[2 * i + 1] = right_pad;
68
+ }
69
+ },
70
+ [&](const ExpandingArray<D>& pad) {
71
+ _reversed_padding_repeated_twice =
72
+ torch::nn::modules::utils::_reverse_repeat_vector(pad, 2);
73
+ }),
74
+ options.padding());
75
+
76
+ if (options.transposed()) {
77
+ std::vector<int64_t> weight_sizes = {
78
+ options.in_channels(), options.out_channels() / options.groups()};
79
+ weight_sizes.insert(
80
+ weight_sizes.end(),
81
+ (*options.kernel_size()).begin(),
82
+ (*options.kernel_size()).end());
83
+ weight = this->register_parameter("weight", torch::empty(weight_sizes));
84
+ } else {
85
+ std::vector<int64_t> weight_sizes = {
86
+ options.out_channels(), options.in_channels() / options.groups()};
87
+ weight_sizes.insert(
88
+ weight_sizes.end(),
89
+ (*options.kernel_size()).begin(),
90
+ (*options.kernel_size()).end());
91
+ weight = this->register_parameter("weight", torch::empty(weight_sizes));
92
+ }
93
+
94
+ if (options.bias()) {
95
+ bias = this->register_parameter(
96
+ "bias", torch::empty({options.out_channels()}));
97
+ } else {
98
+ this->register_parameter("bias", Tensor(), /*requires_grad=*/false);
99
+ }
100
+
101
+ reset_parameters();
102
+ }
103
+
104
+ void reset_parameters() {
105
+ init::kaiming_uniform_(
106
+ weight,
107
+ /*a=*/std::sqrt(5)); // NOLINT(cppcoreguidelines-avoid-magic-numbers)
108
+
109
+ if (bias.defined()) {
110
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
111
+ int64_t fan_in, fan_out;
112
+ std::tie(fan_in, fan_out) = init::_calculate_fan_in_and_fan_out(weight);
113
+ auto bound = 1 / std::sqrt(fan_in);
114
+ init::uniform_(bias, -bound, bound);
115
+ }
116
+ }
117
+
118
+ /// Pretty prints the `Conv{1,2,3}d` module into the given `stream`.
119
+ void pretty_print(std::ostream& stream) const override {
120
+ stream << "torch::nn::Conv" << D << "d"
121
+ << "(" << options.in_channels() << ", " << options.out_channels()
122
+ << ", kernel_size=" << options.kernel_size()
123
+ << ", stride=" << options.stride();
124
+ std::visit(
125
+ c10::overloaded(
126
+ [&](enumtype::kValid) { stream << ", padding='valid'"; },
127
+ [&](enumtype::kSame) { stream << ", padding='same'"; },
128
+ [&](const ExpandingArray<D>& pad) {
129
+ if (*pad != *ExpandingArray<D>(0)) {
130
+ stream << ", padding=" << pad;
131
+ }
132
+ }),
133
+ options.padding());
134
+ if (*options.dilation() != *ExpandingArray<D>(1)) {
135
+ stream << ", dilation=" << options.dilation();
136
+ }
137
+ if (*options.output_padding() != *ExpandingArray<D>(0)) {
138
+ stream << ", output_padding=" << options.output_padding();
139
+ }
140
+ if (options.groups() != 1) {
141
+ stream << ", groups=" << options.groups();
142
+ }
143
+ if (!options.bias()) {
144
+ stream << ", bias=" << std::boolalpha << false;
145
+ }
146
+ if (!std::get_if<enumtype::kZeros>(&options.padding_mode())) {
147
+ stream << ", padding_mode="
148
+ << enumtype::get_enum_name(options.padding_mode());
149
+ }
150
+ stream << ")";
151
+ }
152
+
153
+ /// The options with which this `Module` was constructed.
154
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
155
+ detail::ConvNdOptions<D> options;
156
+
157
+ /// The learned kernel (or "weight").
158
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
159
+ Tensor weight;
160
+
161
+ /// The learned bias. Only defined if the `bias` option was true.
162
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
163
+ Tensor bias;
164
+
165
+ protected:
166
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
167
+ std::vector<int64_t> _reversed_padding_repeated_twice;
168
+ };
169
+
170
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Conv1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
171
+
172
+ /// Applies convolution over a 1-D input.
173
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Conv1d to learn about
174
+ /// the exact behavior of this module.
175
+ ///
176
+ /// See the documentation for `torch::nn::Conv1dOptions` class to learn what
177
+ /// constructor arguments are supported for this module.
178
+ ///
179
+ /// Example:
180
+ /// ```
181
+ /// Conv1d model(Conv1dOptions(3, 2, 3).stride(1).bias(false));
182
+ /// ```
183
+ class TORCH_API Conv1dImpl : public ConvNdImpl<1, Conv1dImpl> {
184
+ public:
185
+ Conv1dImpl(
186
+ int64_t input_channels,
187
+ int64_t output_channels,
188
+ ExpandingArray<1> kernel_size)
189
+ : Conv1dImpl(
190
+ Conv1dOptions(input_channels, output_channels, kernel_size)) {}
191
+ explicit Conv1dImpl(Conv1dOptions options_);
192
+ Tensor forward(const Tensor& input);
193
+ };
194
+
195
+ /// A `ModuleHolder` subclass for `Conv1dImpl`.
196
+ /// See the documentation for `Conv1dImpl` class to learn what methods it
197
+ /// provides, and examples of how to use `Conv1d` with
198
+ /// `torch::nn::Conv1dOptions`. See the documentation for `ModuleHolder` to
199
+ /// learn about PyTorch's module storage semantics.
200
+ TORCH_MODULE(Conv1d);
201
+
202
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Conv2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
203
+
204
+ /// Applies convolution over a 2-D input.
205
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Conv2d to learn about
206
+ /// the exact behavior of this module.
207
+ ///
208
+ /// See the documentation for `torch::nn::Conv2dOptions` class to learn what
209
+ /// constructor arguments are supported for this module.
210
+ ///
211
+ /// Example:
212
+ /// ```
213
+ /// Conv2d model(Conv2dOptions(3, 2, 3).stride(1).bias(false));
214
+ /// ```
215
+ class TORCH_API Conv2dImpl : public ConvNdImpl<2, Conv2dImpl> {
216
+ public:
217
+ Conv2dImpl(
218
+ int64_t input_channels,
219
+ int64_t output_channels,
220
+ ExpandingArray<2> kernel_size)
221
+ : Conv2dImpl(
222
+ Conv2dOptions(input_channels, output_channels, kernel_size)) {}
223
+ explicit Conv2dImpl(Conv2dOptions options_);
224
+ Tensor forward(const Tensor& input);
225
+
226
+ protected:
227
+ Tensor _conv_forward(const Tensor& input, const Tensor& weight);
228
+ };
229
+
230
+ /// A `ModuleHolder` subclass for `Conv2dImpl`.
231
+ /// See the documentation for `Conv2dImpl` class to learn what methods it
232
+ /// provides, and examples of how to use `Conv2d` with
233
+ /// `torch::nn::Conv2dOptions`. See the documentation for `ModuleHolder` to
234
+ /// learn about PyTorch's module storage semantics.
235
+ TORCH_MODULE(Conv2d);
236
+
237
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Conv3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
238
+
239
+ /// Applies convolution over a 3-D input.
240
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Conv3d to learn about
241
+ /// the exact behavior of this module.
242
+ ///
243
+ /// See the documentation for `torch::nn::Conv3dOptions` class to learn what
244
+ /// constructor arguments are supported for this module.
245
+ ///
246
+ /// Example:
247
+ /// ```
248
+ /// Conv3d model(Conv3dOptions(3, 2, 3).stride(1).bias(false));
249
+ /// ```
250
+ class TORCH_API Conv3dImpl : public ConvNdImpl<3, Conv3dImpl> {
251
+ public:
252
+ Conv3dImpl(
253
+ int64_t input_channels,
254
+ int64_t output_channels,
255
+ ExpandingArray<3> kernel_size)
256
+ : Conv3dImpl(
257
+ Conv3dOptions(input_channels, output_channels, kernel_size)) {}
258
+ explicit Conv3dImpl(Conv3dOptions options_);
259
+ Tensor forward(const Tensor& input);
260
+ };
261
+
262
+ /// A `ModuleHolder` subclass for `Conv3dImpl`.
263
+ /// See the documentation for `Conv3dImpl` class to learn what methods it
264
+ /// provides, and examples of how to use `Conv3d` with
265
+ /// `torch::nn::Conv3dOptions`. See the documentation for `ModuleHolder` to
266
+ /// learn about PyTorch's module storage semantics.
267
+ TORCH_MODULE(Conv3d);
268
+
269
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~ ConvTranspose ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
270
+
271
+ /// Base class for all (dimension-specialized) convolution transpose modules.
272
+ template <size_t D, typename Derived>
273
+ class ConvTransposeNdImpl : public ConvNdImpl<D, Derived> {
274
+ public:
275
+ using torch::nn::ConvNdImpl<D, Derived>::ConvNdImpl;
276
+ explicit ConvTransposeNdImpl(detail::ConvNdOptions<D> options_)
277
+ : ConvNdImpl<D, Derived>(options_) {
278
+ TORCH_INTERNAL_ASSERT(
279
+ std::holds_alternative<ExpandingArray<D>>(this->options.padding()),
280
+ "ConvTranspose padding cannot be a string");
281
+ }
282
+
283
+ /// Pretty prints the `ConvTranspose{1,2,3}d` module into the given `stream`.
284
+ void pretty_print(std::ostream& stream) const override {
285
+ stream << "torch::nn::ConvTranspose" << D << "d"
286
+ << "(" << this->options.in_channels() << ", "
287
+ << this->options.out_channels()
288
+ << ", kernel_size=" << this->options.kernel_size()
289
+ << ", stride=" << this->options.stride();
290
+ const auto& pad = padding();
291
+ if (*pad != *ExpandingArray<D>(0)) {
292
+ stream << ", padding=" << pad;
293
+ }
294
+ if (*this->options.dilation() != *ExpandingArray<D>(1)) {
295
+ stream << ", dilation=" << this->options.dilation();
296
+ }
297
+ if (*this->options.output_padding() != *ExpandingArray<D>(0)) {
298
+ stream << ", output_padding=" << this->options.output_padding();
299
+ }
300
+ if (this->options.groups() != 1) {
301
+ stream << ", groups=" << this->options.groups();
302
+ }
303
+ if (!this->options.bias()) {
304
+ stream << ", bias=" << std::boolalpha << false;
305
+ }
306
+ if (!std::get_if<enumtype::kZeros>(&this->options.padding_mode())) {
307
+ stream << ", padding_mode="
308
+ << enumtype::get_enum_name(this->options.padding_mode());
309
+ }
310
+ stream << ")";
311
+ }
312
+
313
+ protected:
314
+ const ExpandingArray<D>& padding() const {
315
+ return std::get<ExpandingArray<D>>(this->options.padding());
316
+ }
317
+
318
+ std::vector<int64_t> _output_padding(
319
+ const Tensor& input,
320
+ const c10::optional<at::IntArrayRef>& output_size,
321
+ const ExpandingArray<D>& stride,
322
+ const ExpandingArray<D>& padding,
323
+ const ExpandingArray<D>& kernel_size);
324
+ };
325
+
326
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConvTranspose1d
327
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
328
+
329
+ /// Applies the ConvTranspose1d function.
330
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ConvTranspose1d to
331
+ /// learn about the exact behavior of this module.
332
+ ///
333
+ /// See the documentation for `torch::nn::ConvTranspose1dOptions` class to learn
334
+ /// what constructor arguments are supported for this module.
335
+ ///
336
+ /// Example:
337
+ /// ```
338
+ /// ConvTranspose1d model(ConvTranspose1dOptions(3, 2,
339
+ /// 3).stride(1).bias(false));
340
+ /// ```
341
+ class TORCH_API ConvTranspose1dImpl
342
+ : public ConvTransposeNdImpl<1, ConvTranspose1dImpl> {
343
+ public:
344
+ ConvTranspose1dImpl(
345
+ int64_t input_channels,
346
+ int64_t output_channels,
347
+ ExpandingArray<1> kernel_size)
348
+ : ConvTranspose1dImpl(ConvTranspose1dOptions(
349
+ input_channels,
350
+ output_channels,
351
+ kernel_size)) {}
352
+ explicit ConvTranspose1dImpl(ConvTranspose1dOptions options_);
353
+ Tensor forward(
354
+ const Tensor& input,
355
+ const c10::optional<at::IntArrayRef>& output_size = c10::nullopt);
356
+
357
+ protected:
358
+ FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(c10::optional<at::IntArrayRef>())})
359
+ };
360
+
361
+ /// A `ModuleHolder` subclass for `ConvTranspose1dImpl`.
362
+ /// See the documentation for `ConvTranspose1dImpl` class to learn what methods
363
+ /// it provides, and examples of how to use `ConvTranspose1d` with
364
+ /// `torch::nn::ConvTranspose1dOptions`. See the documentation for
365
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
366
+ TORCH_MODULE(ConvTranspose1d);
367
+
368
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConvTranspose2d
369
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
370
+
371
+ /// Applies the ConvTranspose2d function.
372
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ConvTranspose2d to
373
+ /// learn about the exact behavior of this module.
374
+ ///
375
+ /// See the documentation for `torch::nn::ConvTranspose2dOptions` class to learn
376
+ /// what constructor arguments are supported for this module.
377
+ ///
378
+ /// Example:
379
+ /// ```
380
+ /// ConvTranspose2d model(ConvTranspose2dOptions(3, 2,
381
+ /// 3).stride(1).bias(false));
382
+ /// ```
383
+ class TORCH_API ConvTranspose2dImpl
384
+ : public ConvTransposeNdImpl<2, ConvTranspose2dImpl> {
385
+ public:
386
+ ConvTranspose2dImpl(
387
+ int64_t input_channels,
388
+ int64_t output_channels,
389
+ ExpandingArray<2> kernel_size)
390
+ : ConvTranspose2dImpl(ConvTranspose2dOptions(
391
+ input_channels,
392
+ output_channels,
393
+ kernel_size)) {}
394
+ explicit ConvTranspose2dImpl(ConvTranspose2dOptions options_);
395
+ Tensor forward(
396
+ const Tensor& input,
397
+ const c10::optional<at::IntArrayRef>& output_size = c10::nullopt);
398
+
399
+ protected:
400
+ FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(c10::optional<at::IntArrayRef>())})
401
+ };
402
+
403
+ /// A `ModuleHolder` subclass for `ConvTranspose2dImpl`.
404
+ /// See the documentation for `ConvTranspose2dImpl` class to learn what methods
405
+ /// it provides, and examples of how to use `ConvTranspose2d` with
406
+ /// `torch::nn::ConvTranspose2dOptions`. See the documentation for
407
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
408
+ TORCH_MODULE(ConvTranspose2d);
409
+
410
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConvTranspose3d
411
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
412
+
413
+ /// Applies the ConvTranspose3d function.
414
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ConvTranspose3d to
415
+ /// learn about the exact behavior of this module.
416
+ ///
417
+ /// See the documentation for `torch::nn::ConvTranspose3dOptions` class to learn
418
+ /// what constructor arguments are supported for this module.
419
+ ///
420
+ /// Example:
421
+ /// ```
422
+ /// ConvTranspose3d model(ConvTranspose3dOptions(2, 2,
423
+ /// 2).stride(1).bias(false));
424
+ /// ```
425
+ class TORCH_API ConvTranspose3dImpl
426
+ : public ConvTransposeNdImpl<3, ConvTranspose3dImpl> {
427
+ public:
428
+ ConvTranspose3dImpl(
429
+ int64_t input_channels,
430
+ int64_t output_channels,
431
+ ExpandingArray<3> kernel_size)
432
+ : ConvTranspose3dImpl(ConvTranspose3dOptions(
433
+ input_channels,
434
+ output_channels,
435
+ kernel_size)) {}
436
+ explicit ConvTranspose3dImpl(ConvTranspose3dOptions options_);
437
+ Tensor forward(
438
+ const Tensor& input,
439
+ const c10::optional<at::IntArrayRef>& output_size = c10::nullopt);
440
+
441
+ protected:
442
+ FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(c10::optional<at::IntArrayRef>())})
443
+ };
444
+
445
+ /// A `ModuleHolder` subclass for `ConvTranspose3dImpl`.
446
+ /// See the documentation for `ConvTranspose3dImpl` class to learn what methods
447
+ /// it provides, and examples of how to use `ConvTranspose3d` with
448
+ /// `torch::nn::ConvTranspose3dOptions`. See the documentation for
449
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
450
+ TORCH_MODULE(ConvTranspose3d);
451
+
452
+ } // namespace nn
453
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/distance.h ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/functional/distance.h>
5
+ #include <torch/nn/options/distance.h>
6
+ #include <torch/nn/pimpl.h>
7
+ #include <torch/types.h>
8
+
9
+ #include <torch/csrc/Export.h>
10
+
11
+ namespace torch {
12
+ namespace nn {
13
+
14
+ /// Returns the cosine similarity between :math:`x_1` and :math:`x_2`, computed
15
+ /// along `dim`.
16
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.CosineSimilarity to
17
+ /// learn about the exact behavior of this module.
18
+ ///
19
+ /// See the documentation for `torch::nn::CosineSimilarityOptions` class to
20
+ /// learn what constructor arguments are supported for this module.
21
+ ///
22
+ /// Example:
23
+ /// ```
24
+ /// CosineSimilarity model(CosineSimilarityOptions().dim(0).eps(0.5));
25
+ /// ```
26
+ class TORCH_API CosineSimilarityImpl : public Cloneable<CosineSimilarityImpl> {
27
+ public:
28
+ explicit CosineSimilarityImpl(const CosineSimilarityOptions& options_ = {});
29
+
30
+ void reset() override;
31
+
32
+ /// Pretty prints the `CosineSimilarity` module into the given `stream`.
33
+ void pretty_print(std::ostream& stream) const override;
34
+
35
+ Tensor forward(const Tensor& input1, const Tensor& input2);
36
+
37
+ /// The options with which this `Module` was constructed.
38
+ CosineSimilarityOptions options;
39
+ };
40
+
41
+ /// A `ModuleHolder` subclass for `CosineSimilarityImpl`.
42
+ /// See the documentation for `CosineSimilarityImpl` class to learn what methods
43
+ /// it provides, and examples of how to use `CosineSimilarity` with
44
+ /// `torch::nn::CosineSimilarityOptions`. See the documentation for
45
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
46
+ TORCH_MODULE(CosineSimilarity);
47
+
48
+ // ============================================================================
49
+
50
+ /// Returns the batchwise pairwise distance between vectors :math:`v_1`,
51
+ /// :math:`v_2` using the p-norm.
52
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.PairwiseDistance to
53
+ /// learn about the exact behavior of this module.
54
+ ///
55
+ /// See the documentation for `torch::nn::PairwiseDistanceOptions` class to
56
+ /// learn what constructor arguments are supported for this module.
57
+ ///
58
+ /// Example:
59
+ /// ```
60
+ /// PairwiseDistance
61
+ /// model(PairwiseDistanceOptions().p(3).eps(0.5).keepdim(true));
62
+ /// ```
63
+ class TORCH_API PairwiseDistanceImpl : public Cloneable<PairwiseDistanceImpl> {
64
+ public:
65
+ explicit PairwiseDistanceImpl(const PairwiseDistanceOptions& options_ = {});
66
+
67
+ void reset() override;
68
+
69
+ /// Pretty prints the `PairwiseDistance` module into the given `stream`.
70
+ void pretty_print(std::ostream& stream) const override;
71
+
72
+ Tensor forward(const Tensor& input1, const Tensor& input2);
73
+
74
+ /// The options with which this `Module` was constructed.
75
+ PairwiseDistanceOptions options;
76
+ };
77
+
78
+ /// A `ModuleHolder` subclass for `PairwiseDistanceImpl`.
79
+ /// See the documentation for `PairwiseDistanceImpl` class to learn what methods
80
+ /// it provides, and examples of how to use `PairwiseDistance` with
81
+ /// `torch::nn::PairwiseDistanceOptions`. See the documentation for
82
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
83
+ TORCH_MODULE(PairwiseDistance);
84
+
85
+ } // namespace nn
86
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/dropout.h ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/options/dropout.h>
5
+ #include <torch/nn/pimpl.h>
6
+ #include <torch/types.h>
7
+
8
+ #include <torch/csrc/Export.h>
9
+
10
+ #include <cstddef>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace nn {
15
+
16
+ namespace detail {
17
+
18
+ template <typename Derived>
19
+ class _DropoutNd : public torch::nn::Cloneable<Derived> {
20
+ public:
21
+ _DropoutNd(double p) : _DropoutNd(DropoutOptions().p(p)){};
22
+
23
+ explicit _DropoutNd(const DropoutOptions& options_ = {}) : options(options_) {
24
+ // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall)
25
+ reset();
26
+ }
27
+
28
+ void reset() override {
29
+ TORCH_CHECK(
30
+ options.p() >= 0. && options.p() <= 1.,
31
+ "dropout probability has to be between 0 and 1, but got ",
32
+ options.p());
33
+ }
34
+
35
+ /// The options with which this `Module` was constructed.
36
+ DropoutOptions options;
37
+ };
38
+
39
+ } // namespace detail
40
+
41
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dropout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
42
+
43
+ /// Applies dropout over a 1-D input.
44
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Dropout to learn
45
+ /// about the exact behavior of this module.
46
+ ///
47
+ /// See the documentation for `torch::nn::DropoutOptions` class to learn what
48
+ /// constructor arguments are supported for this module.
49
+ ///
50
+ /// Example:
51
+ /// ```
52
+ /// Dropout model(DropoutOptions().p(0.42).inplace(true));
53
+ /// ```
54
+ class TORCH_API DropoutImpl : public detail::_DropoutNd<DropoutImpl> {
55
+ public:
56
+ using detail::_DropoutNd<DropoutImpl>::_DropoutNd;
57
+
58
+ Tensor forward(Tensor input);
59
+
60
+ /// Pretty prints the `Dropout` module into the given `stream`.
61
+ void pretty_print(std::ostream& stream) const override;
62
+ };
63
+
64
+ /// A `ModuleHolder` subclass for `DropoutImpl`.
65
+ /// See the documentation for `DropoutImpl` class to learn what methods it
66
+ /// provides, and examples of how to use `Dropout` with
67
+ /// `torch::nn::DropoutOptions`. See the documentation for `ModuleHolder` to
68
+ /// learn about PyTorch's module storage semantics.
69
+ TORCH_MODULE(Dropout);
70
+
71
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dropout2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
72
+
73
+ /// Applies dropout over a 2-D input.
74
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Dropout2d to learn
75
+ /// about the exact behavior of this module.
76
+ ///
77
+ /// See the documentation for `torch::nn::Dropout2dOptions` class to learn what
78
+ /// constructor arguments are supported for this module.
79
+ ///
80
+ /// Example:
81
+ /// ```
82
+ /// Dropout2d model(Dropout2dOptions().p(0.42).inplace(true));
83
+ /// ```
84
+ class TORCH_API Dropout2dImpl : public detail::_DropoutNd<Dropout2dImpl> {
85
+ public:
86
+ using detail::_DropoutNd<Dropout2dImpl>::_DropoutNd;
87
+
88
+ Tensor forward(Tensor input);
89
+
90
+ /// Pretty prints the `Dropout2d` module into the given `stream`.
91
+ void pretty_print(std::ostream& stream) const override;
92
+ };
93
+
94
+ /// A `ModuleHolder` subclass for `Dropout2dImpl`.
95
+ /// See the documentation for `Dropout2dImpl` class to learn what methods it
96
+ /// provides, and examples of how to use `Dropout2d` with
97
+ /// `torch::nn::Dropout2dOptions`. See the documentation for `ModuleHolder` to
98
+ /// learn about PyTorch's module storage semantics.
99
+ TORCH_MODULE(Dropout2d);
100
+
101
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dropout3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
102
+
103
+ /// Applies dropout over a 3-D input.
104
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Dropout3d to learn
105
+ /// about the exact behavior of this module.
106
+ ///
107
+ /// See the documentation for `torch::nn::Dropout3dOptions` class to learn what
108
+ /// constructor arguments are supported for this module.
109
+ ///
110
+ /// Example:
111
+ /// ```
112
+ /// Dropout3d model(Dropout3dOptions().p(0.42).inplace(true));
113
+ /// ```
114
+ class TORCH_API Dropout3dImpl : public detail::_DropoutNd<Dropout3dImpl> {
115
+ public:
116
+ using detail::_DropoutNd<Dropout3dImpl>::_DropoutNd;
117
+
118
+ Tensor forward(Tensor input);
119
+
120
+ /// Pretty prints the `Dropout3d` module into the given `stream`.
121
+ void pretty_print(std::ostream& stream) const override;
122
+ };
123
+
124
+ /// A `ModuleHolder` subclass for `Dropout3dImpl`.
125
+ /// See the documentation for `Dropout3dImpl` class to learn what methods it
126
+ /// provides, and examples of how to use `Dropout3d` with
127
+ /// `torch::nn::Dropout3dOptions`. See the documentation for `ModuleHolder` to
128
+ /// learn about PyTorch's module storage semantics.
129
+ TORCH_MODULE(Dropout3d);
130
+
131
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AlphaDropout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
132
+
133
+ /// Applies Alpha Dropout over the input.
134
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.AlphaDropout to learn
135
+ /// about the exact behavior of this module.
136
+ ///
137
+ /// See the documentation for `torch::nn::AlphaDropoutOptions` class to learn
138
+ /// what constructor arguments are supported for this module.
139
+ ///
140
+ /// Example:
141
+ /// ```
142
+ /// AlphaDropout model(AlphaDropoutOptions(0.2).inplace(true));
143
+ /// ```
144
+ class TORCH_API AlphaDropoutImpl : public detail::_DropoutNd<AlphaDropoutImpl> {
145
+ public:
146
+ using detail::_DropoutNd<AlphaDropoutImpl>::_DropoutNd;
147
+
148
+ Tensor forward(const Tensor& input);
149
+
150
+ /// Pretty prints the `AlphaDropout` module into the given `stream`.
151
+ void pretty_print(std::ostream& stream) const override;
152
+ };
153
+
154
+ /// A `ModuleHolder` subclass for `AlphaDropoutImpl`.
155
+ /// See the documentation for `AlphaDropoutImpl` class to learn what methods it
156
+ /// provides, and examples of how to use `AlphaDropout` with
157
+ /// `torch::nn::AlphaDropoutOptions`. See the documentation for `ModuleHolder`
158
+ /// to learn about PyTorch's module storage semantics.
159
+ TORCH_MODULE(AlphaDropout);
160
+
161
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FeatureAlphaDropout
162
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
163
+
164
+ /// See the documentation for `torch::nn::FeatureAlphaDropoutOptions` class to
165
+ /// learn what constructor arguments are supported for this module.
166
+ ///
167
+ /// Example:
168
+ /// ```
169
+ /// FeatureAlphaDropout model(FeatureAlphaDropoutOptions(0.2).inplace(true));
170
+ /// ```
171
+ class TORCH_API FeatureAlphaDropoutImpl
172
+ : public detail::_DropoutNd<FeatureAlphaDropoutImpl> {
173
+ public:
174
+ using detail::_DropoutNd<FeatureAlphaDropoutImpl>::_DropoutNd;
175
+
176
+ Tensor forward(const Tensor& input);
177
+
178
+ /// Pretty prints the `FeatureAlphaDropout` module into the given `stream`.
179
+ void pretty_print(std::ostream& stream) const override;
180
+ };
181
+
182
+ /// A `ModuleHolder` subclass for `FeatureAlphaDropoutImpl`.
183
+ /// See the documentation for `FeatureAlphaDropoutImpl` class to learn what
184
+ /// methods it provides, and examples of how to use `FeatureAlphaDropout` with
185
+ /// `torch::nn::FeatureAlphaDropoutOptions`. See the documentation for
186
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
187
+ TORCH_MODULE(FeatureAlphaDropout);
188
+
189
+ } // namespace nn
190
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/embedding.h ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/functional/embedding.h>
5
+ #include <torch/nn/modules/common.h>
6
+ #include <torch/nn/options/embedding.h>
7
+ #include <torch/nn/pimpl.h>
8
+ #include <torch/types.h>
9
+
10
+ #include <cstddef>
11
+
12
+ namespace torch {
13
+ namespace nn {
14
+
15
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Embedding
16
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
17
+
18
+ /// Performs a lookup in a fixed size embedding table.
19
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Embedding to learn
20
+ /// about the exact behavior of this module.
21
+ ///
22
+ /// See the documentation for `torch::nn::EmbeddingOptions` class to learn what
23
+ /// constructor arguments are supported for this module.
24
+ ///
25
+ /// Example:
26
+ /// ```
27
+ /// Embedding model(EmbeddingOptions(10,
28
+ /// 2).padding_idx(3).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true));
29
+ /// ```
30
+ class TORCH_API EmbeddingImpl : public torch::nn::Cloneable<EmbeddingImpl> {
31
+ public:
32
+ EmbeddingImpl(int64_t num_embeddings, int64_t embedding_dim)
33
+ : EmbeddingImpl(EmbeddingOptions(num_embeddings, embedding_dim)) {}
34
+ explicit EmbeddingImpl(EmbeddingOptions options_);
35
+
36
+ void reset() override;
37
+
38
+ void reset_parameters();
39
+
40
+ /// Pretty prints the `Embedding` module into the given `stream`.
41
+ void pretty_print(std::ostream& stream) const override;
42
+
43
+ /// Performs a lookup on the embedding table stored in `weight` using the
44
+ /// `indices` supplied and returns the result.
45
+ Tensor forward(const Tensor& indices);
46
+
47
+ /// The `Options` used to configure this `Embedding` module.
48
+ /// Changes to `EmbeddingOptions` *after construction* have no effect.
49
+ EmbeddingOptions options;
50
+
51
+ /// The embedding table.
52
+ Tensor weight;
53
+ };
54
+
55
+ /// A `ModuleHolder` subclass for `EmbeddingImpl`.
56
+ /// See the documentation for `EmbeddingImpl` class to learn what methods it
57
+ /// provides, and examples of how to use `Embedding` with
58
+ /// `torch::nn::EmbeddingOptions`. See the documentation for `ModuleHolder` to
59
+ /// learn about PyTorch's module storage semantics.
60
+ class Embedding : public torch::nn::ModuleHolder<EmbeddingImpl> {
61
+ public:
62
+ using torch::nn::ModuleHolder<EmbeddingImpl>::ModuleHolder;
63
+
64
+ /// See the documentation for `torch::nn::EmbeddingFromPretrainedOptions`
65
+ /// class to learn what optional arguments are supported for this function.
66
+ static Embedding from_pretrained(
67
+ const torch::Tensor& embeddings,
68
+ const EmbeddingFromPretrainedOptions& options = {}) {
69
+ TORCH_CHECK(
70
+ embeddings.dim() == 2,
71
+ "Embeddings parameter is expected to be 2-dimensional");
72
+
73
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
74
+ int64_t rows, cols;
75
+ rows = embeddings.size(0);
76
+ cols = embeddings.size(1);
77
+
78
+ Embedding embedding(EmbeddingOptions(rows, cols)
79
+ ._weight(embeddings)
80
+ .padding_idx(options.padding_idx())
81
+ .max_norm(options.max_norm())
82
+ .norm_type(options.norm_type())
83
+ .scale_grad_by_freq(options.scale_grad_by_freq())
84
+ .sparse(options.sparse()));
85
+ embedding->weight.set_requires_grad(!options.freeze());
86
+ return embedding;
87
+ }
88
+ };
89
+
90
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ EmbeddingBag
91
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
92
+
93
+ /// Computes sums or means of 'bags' of embeddings, without instantiating the
94
+ /// intermediate embeddings.
95
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.EmbeddingBag to learn
96
+ /// about the exact behavior of this module.
97
+ ///
98
+ /// See the documentation for `torch::nn::EmbeddingBagOptions` class to learn
99
+ /// what constructor arguments are supported for this module.
100
+ ///
101
+ /// Example:
102
+ /// ```
103
+ /// EmbeddingBag model(EmbeddingBagOptions(10,
104
+ /// 2).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true).mode(torch::kSum).padding_idx(1));
105
+ /// ```
106
+ class TORCH_API EmbeddingBagImpl
107
+ : public torch::nn::Cloneable<EmbeddingBagImpl> {
108
+ public:
109
+ EmbeddingBagImpl(int64_t num_embeddings, int64_t embedding_dim)
110
+ : EmbeddingBagImpl(EmbeddingBagOptions(num_embeddings, embedding_dim)) {}
111
+ explicit EmbeddingBagImpl(EmbeddingBagOptions options_);
112
+
113
+ void reset() override;
114
+
115
+ void reset_parameters();
116
+
117
+ /// Pretty prints the `EmbeddingBag` module into the given `stream`.
118
+ void pretty_print(std::ostream& stream) const override;
119
+
120
+ /// The `Options` used to configure this `EmbeddingBag` module.
121
+ EmbeddingBagOptions options;
122
+ /// The embedding table.
123
+ Tensor weight;
124
+
125
+ Tensor forward(
126
+ const Tensor& input,
127
+ const Tensor& offsets = {},
128
+ const Tensor& per_sample_weights = {});
129
+
130
+ protected:
131
+ FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())}, {2, AnyValue(Tensor())})
132
+ };
133
+
134
+ /// A `ModuleHolder` subclass for `EmbeddingBagImpl`.
135
+ /// See the documentation for `EmbeddingBagImpl` class to learn what methods it
136
+ /// provides, and examples of how to use `EmbeddingBag` with
137
+ /// `torch::nn::EmbeddingBagOptions`. See the documentation for `ModuleHolder`
138
+ /// to learn about PyTorch's module storage semantics.
139
+ class EmbeddingBag : public torch::nn::ModuleHolder<EmbeddingBagImpl> {
140
+ public:
141
+ using torch::nn::ModuleHolder<EmbeddingBagImpl>::ModuleHolder;
142
+
143
+ /// See the documentation for `torch::nn::EmbeddingBagFromPretrainedOptions`
144
+ /// class to learn what optional arguments are supported for this function.
145
+ static EmbeddingBag from_pretrained(
146
+ const torch::Tensor& embeddings,
147
+ const EmbeddingBagFromPretrainedOptions& options = {}) {
148
+ TORCH_CHECK(
149
+ embeddings.dim() == 2,
150
+ "Embeddings parameter is expected to be 2-dimensional");
151
+
152
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
153
+ int64_t rows, cols;
154
+ rows = embeddings.size(0);
155
+ cols = embeddings.size(1);
156
+
157
+ EmbeddingBag embeddingbag(
158
+ EmbeddingBagOptions(rows, cols)
159
+ ._weight(embeddings)
160
+ .max_norm(options.max_norm())
161
+ .norm_type(options.norm_type())
162
+ .scale_grad_by_freq(options.scale_grad_by_freq())
163
+ .mode(options.mode())
164
+ .sparse(options.sparse())
165
+ .padding_idx(options.padding_idx()));
166
+ embeddingbag->weight.set_requires_grad(!options.freeze());
167
+ return embeddingbag;
168
+ }
169
+ };
170
+ } // namespace nn
171
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/fold.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/expanding_array.h>
4
+ #include <torch/nn/cloneable.h>
5
+ #include <torch/nn/functional/fold.h>
6
+ #include <torch/nn/options/fold.h>
7
+ #include <torch/nn/pimpl.h>
8
+ #include <torch/types.h>
9
+
10
+ namespace torch {
11
+ namespace nn {
12
+
13
+ /// Applies fold over a 3-D input.
14
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Fold to learn about
15
+ /// the exact behavior of this module.
16
+ ///
17
+ /// See the documentation for `torch::nn::FoldOptions` class to learn what
18
+ /// constructor arguments are supported for this module.
19
+ ///
20
+ /// Example:
21
+ /// ```
22
+ /// Fold model(FoldOptions({8, 8}, {3, 3}).dilation(2).padding({2,
23
+ /// 1}).stride(2));
24
+ /// ```
25
+ class TORCH_API FoldImpl : public torch::nn::Cloneable<FoldImpl> {
26
+ public:
27
+ FoldImpl(ExpandingArray<2> output_size, ExpandingArray<2> kernel_size)
28
+ : FoldImpl(FoldOptions(output_size, kernel_size)) {}
29
+ explicit FoldImpl(const FoldOptions& options_);
30
+
31
+ void reset() override;
32
+
33
+ /// Pretty prints the `Fold` module into the given `stream`.
34
+ void pretty_print(std::ostream& stream) const override;
35
+
36
+ Tensor forward(const Tensor& input);
37
+
38
+ /// The options with which this `Module` was constructed.
39
+ FoldOptions options;
40
+ };
41
+
42
+ /// A `ModuleHolder` subclass for `FoldImpl`.
43
+ /// See the documentation for `FoldImpl` class to learn what methods it
44
+ /// provides, and examples of how to use `Fold` with `torch::nn::FoldOptions`.
45
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
46
+ /// module storage semantics.
47
+ TORCH_MODULE(Fold);
48
+
49
+ // ============================================================================
50
+
51
+ /// Applies unfold over a 4-D input.
52
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.Unfold to learn about
53
+ /// the exact behavior of this module.
54
+ ///
55
+ /// See the documentation for `torch::nn::UnfoldOptions` class to learn what
56
+ /// constructor arguments are supported for this module.
57
+ ///
58
+ /// Example:
59
+ /// ```
60
+ /// Unfold model(UnfoldOptions({2, 4}).dilation(2).padding({2, 1}).stride(2));
61
+ /// ```
62
+ class TORCH_API UnfoldImpl : public Cloneable<UnfoldImpl> {
63
+ public:
64
+ UnfoldImpl(ExpandingArray<2> kernel_size)
65
+ : UnfoldImpl(UnfoldOptions(kernel_size)) {}
66
+ explicit UnfoldImpl(const UnfoldOptions& options_);
67
+
68
+ void reset() override;
69
+
70
+ /// Pretty prints the `Unfold` module into the given `stream`.
71
+ void pretty_print(std::ostream& stream) const override;
72
+
73
+ Tensor forward(const Tensor& input);
74
+
75
+ /// The options with which this `Module` was constructed.
76
+ UnfoldOptions options;
77
+ };
78
+
79
+ /// A `ModuleHolder` subclass for `UnfoldImpl`.
80
+ /// See the documentation for `UnfoldImpl` class to learn what methods it
81
+ /// provides, and examples of how to use `Unfold` with
82
+ /// `torch::nn::UnfoldOptions`. See the documentation for `ModuleHolder` to
83
+ /// learn about PyTorch's module storage semantics.
84
+ TORCH_MODULE(Unfold);
85
+
86
+ } // namespace nn
87
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/instancenorm.h ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/modules/batchnorm.h>
4
+ #include <torch/nn/options/instancenorm.h>
5
+
6
+ namespace torch {
7
+ namespace nn {
8
+
9
+ /// Base class for all (dimension-specialized) instance norm modules
10
+ template <size_t D, typename Derived>
11
+ class InstanceNormImpl
12
+ : public torch::nn::NormImplBase<D, Derived, InstanceNormOptions> {
13
+ private:
14
+ inline Tensor apply_instance_norm(const Tensor& input) {
15
+ return torch::nn::functional::detail::instance_norm(
16
+ input,
17
+ this->running_mean,
18
+ this->running_var,
19
+ this->weight,
20
+ this->bias,
21
+ this->is_training() || !this->options.track_running_stats(),
22
+ this->options.momentum(),
23
+ this->options.eps());
24
+ }
25
+
26
+ inline Tensor handle_no_batch_input(const Tensor& input) {
27
+ return this->apply_instance_norm(input.unsqueeze(0)).squeeze(0);
28
+ }
29
+
30
+ public:
31
+ using torch::nn::NormImplBase<D, Derived, InstanceNormOptions>::NormImplBase;
32
+
33
+ Tensor forward(const Tensor& input) {
34
+ this->_check_input_dim(input);
35
+
36
+ // For InstanceNorm1D, 2D is unbatched and 3D is batched
37
+ // For InstanceNorm2D, 3D is unbatched and 4D is batched
38
+ // For InstanceNorm3D, 4D is unbatched and 5D is batched
39
+ // check if input does not have a batch-dim
40
+ if (input.dim() == D + 1) {
41
+ return this->handle_no_batch_input(input);
42
+ }
43
+
44
+ return this->apply_instance_norm(input);
45
+ }
46
+
47
+ /// Pretty prints the `InstanceNorm{1,2,3}d` module into the given `stream`.
48
+ void pretty_print(std::ostream& stream) const override {
49
+ stream << std::boolalpha << "torch::nn::InstanceNorm" << D << "d("
50
+ << this->options.num_features() << ", "
51
+ << "eps=" << this->options.eps() << ", "
52
+ << "momentum=" << this->options.momentum() << ", "
53
+ << "affine=" << this->options.affine() << ", "
54
+ << "track_running_stats=" << this->options.track_running_stats()
55
+ << ")";
56
+ }
57
+ };
58
+
59
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ InstanceNorm1d
60
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61
+
62
+ /// Applies the InstanceNorm1d function.
63
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.InstanceNorm1d to learn
64
+ /// about the exact behavior of this module.
65
+ ///
66
+ /// See the documentation for `torch::nn::InstanceNorm1dOptions` class to learn
67
+ /// what constructor arguments are supported for this module.
68
+ ///
69
+ /// Example:
70
+ /// ```
71
+ /// InstanceNorm1d
72
+ /// model(InstanceNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
73
+ /// ```
74
+ class TORCH_API InstanceNorm1dImpl
75
+ : public InstanceNormImpl<1, InstanceNorm1dImpl> {
76
+ protected:
77
+ void _check_input_dim(const Tensor& input) override;
78
+
79
+ public:
80
+ using InstanceNormImpl<1, InstanceNorm1dImpl>::InstanceNormImpl;
81
+ };
82
+
83
+ /// A `ModuleHolder` subclass for `InstanceNorm1dImpl`.
84
+ /// See the documentation for `InstanceNorm1dImpl` class to learn what methods
85
+ /// it provides, and examples of how to use `InstanceNorm1d` with
86
+ /// `torch::nn::InstanceNorm1dOptions`. See the documentation for `ModuleHolder`
87
+ /// to learn about PyTorch's module storage semantics.
88
+ TORCH_MODULE(InstanceNorm1d);
89
+
90
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ InstanceNorm2d
91
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
92
+
93
+ /// Applies the InstanceNorm2d function.
94
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.InstanceNorm2d to learn
95
+ /// about the exact behavior of this module.
96
+ ///
97
+ /// See the documentation for `torch::nn::InstanceNorm2dOptions` class to learn
98
+ /// what constructor arguments are supported for this module.
99
+ ///
100
+ /// Example:
101
+ /// ```
102
+ /// InstanceNorm2d
103
+ /// model(InstanceNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
104
+ /// ```
105
+ class TORCH_API InstanceNorm2dImpl
106
+ : public InstanceNormImpl<2, InstanceNorm2dImpl> {
107
+ protected:
108
+ void _check_input_dim(const Tensor& input) override;
109
+
110
+ public:
111
+ using InstanceNormImpl<2, InstanceNorm2dImpl>::InstanceNormImpl;
112
+ };
113
+
114
+ /// A `ModuleHolder` subclass for `InstanceNorm2dImpl`.
115
+ /// See the documentation for `InstanceNorm2dImpl` class to learn what methods
116
+ /// it provides, and examples of how to use `InstanceNorm2d` with
117
+ /// `torch::nn::InstanceNorm2dOptions`. See the documentation for `ModuleHolder`
118
+ /// to learn about PyTorch's module storage semantics.
119
+ TORCH_MODULE(InstanceNorm2d);
120
+
121
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ InstanceNorm3d
122
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
123
+
124
+ /// Applies the InstanceNorm3d function.
125
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.InstanceNorm3d to learn
126
+ /// about the exact behavior of this module.
127
+ ///
128
+ /// See the documentation for `torch::nn::InstanceNorm3dOptions` class to learn
129
+ /// what constructor arguments are supported for this module.
130
+ ///
131
+ /// Example:
132
+ /// ```
133
+ /// InstanceNorm3d
134
+ /// model(InstanceNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
135
+ /// ```
136
+ class TORCH_API InstanceNorm3dImpl
137
+ : public InstanceNormImpl<3, InstanceNorm3dImpl> {
138
+ protected:
139
+ void _check_input_dim(const Tensor& input) override;
140
+
141
+ public:
142
+ using InstanceNormImpl<3, InstanceNorm3dImpl>::InstanceNormImpl;
143
+ };
144
+
145
+ /// A `ModuleHolder` subclass for `InstanceNorm3dImpl`.
146
+ /// See the documentation for `InstanceNorm3dImpl` class to learn what methods
147
+ /// it provides, and examples of how to use `InstanceNorm3d` with
148
+ /// `torch::nn::InstanceNorm3dOptions`. See the documentation for `ModuleHolder`
149
+ /// to learn about PyTorch's module storage semantics.
150
+ TORCH_MODULE(InstanceNorm3d);
151
+
152
+ } // namespace nn
153
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/linear.h ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/functional/linear.h>
5
+ #include <torch/nn/module.h>
6
+ #include <torch/nn/options/linear.h>
7
+ #include <torch/nn/pimpl.h>
8
+ #include <torch/types.h>
9
+
10
+ #include <cstddef>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace nn {
15
+
16
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Identity ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
17
+
18
+ /// A placeholder identity operator that is argument-insensitive.
19
+ /// See https://pytorch.org/docs/master/generated/torch.nn.Identity.html to
20
+ /// learn about the exact behavior of this module.
21
+ class TORCH_API IdentityImpl : public Cloneable<IdentityImpl> {
22
+ public:
23
+ void reset() override;
24
+
25
+ /// Pretty prints the `Identity` module into the given `stream`.
26
+ void pretty_print(std::ostream& stream) const override;
27
+
28
+ Tensor forward(const Tensor& input);
29
+ };
30
+
31
+ /// A `ModuleHolder` subclass for `IdentityImpl`.
32
+ /// See the documentation for `IdentityImpl` class to learn what methods it
33
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
34
+ /// module storage semantics.
35
+ TORCH_MODULE(Identity);
36
+
37
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Linear ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
38
+
39
+ /// Applies a linear transformation with optional bias.
40
+ /// See https://pytorch.org/docs/master/generated/torch.nn.Linear.html to learn
41
+ /// about the exact behavior of this module.
42
+ ///
43
+ /// See the documentation for `torch::nn::LinearOptions` class to learn what
44
+ /// constructor arguments are supported for this module.
45
+ ///
46
+ /// Example:
47
+ /// ```
48
+ /// Linear model(LinearOptions(5, 2).bias(false));
49
+ /// ```
50
+ class TORCH_API LinearImpl : public Cloneable<LinearImpl> {
51
+ public:
52
+ LinearImpl(int64_t in_features, int64_t out_features)
53
+ : LinearImpl(LinearOptions(in_features, out_features)) {}
54
+ explicit LinearImpl(const LinearOptions& options_);
55
+
56
+ void reset() override;
57
+
58
+ void reset_parameters();
59
+
60
+ /// Pretty prints the `Linear` module into the given `stream`.
61
+ void pretty_print(std::ostream& stream) const override;
62
+
63
+ /// Transforms the `input` tensor by multiplying with the `weight` and
64
+ /// optionally adding the `bias`, if `with_bias` is true in the options.
65
+ Tensor forward(const Tensor& input);
66
+
67
+ /// The options used to configure this module.
68
+ LinearOptions options;
69
+
70
+ /// The learned weight.
71
+ Tensor weight;
72
+
73
+ /// The learned bias. If `bias` is false in the `options`, this tensor is
74
+ /// undefined.
75
+ Tensor bias;
76
+ };
77
+
78
+ /// A `ModuleHolder` subclass for `LinearImpl`.
79
+ /// See the documentation for `LinearImpl` class to learn what methods it
80
+ /// provides, and examples of how to use `Linear` with
81
+ /// `torch::nn::LinearOptions`. See the documentation for `ModuleHolder` to
82
+ /// learn about PyTorch's module storage semantics.
83
+ TORCH_MODULE(Linear);
84
+
85
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Flatten ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
86
+
87
+ /// A placeholder for Flatten operator
88
+ /// See https://pytorch.org/docs/master/generated/torch.nn.Flatten.html to learn
89
+ /// about the exact behavior of this module.
90
+ ///
91
+ /// See the documentation for `torch::nn::FlattenOptions` class to learn what
92
+ /// constructor arguments are supported for this module.
93
+ ///
94
+ /// Example:
95
+ /// ```
96
+ /// Flatten model(FlattenOptions().start_dim(2).end_dim(4));
97
+ /// ```
98
+ class TORCH_API FlattenImpl : public Cloneable<FlattenImpl> {
99
+ public:
100
+ explicit FlattenImpl(const FlattenOptions& options_ = {});
101
+
102
+ void reset() override;
103
+
104
+ /// Pretty prints the `Flatten` module into the given `stream`.
105
+ void pretty_print(std::ostream& stream) const override;
106
+
107
+ /// Applies a flatten transform on the `input`.
108
+ Tensor forward(const Tensor& input);
109
+
110
+ /// The options used to configure this module.
111
+ FlattenOptions options;
112
+ };
113
+
114
+ /// A `ModuleHolder` subclass for `FlattenImpl`.
115
+ /// See the documentation for `FlattenImpl` class to learn what methods it
116
+ /// provides, and examples of how to use `Flatten` with
117
+ /// `torch::nn::FlattenOptions`. See the documentation for `ModuleHolder` to
118
+ /// learn about PyTorch's module storage semantics.
119
+ TORCH_MODULE(Flatten);
120
+
121
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Unflatten
122
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
123
+
124
+ /// A placeholder for unflatten operator
125
+ /// See https://pytorch.org/docs/master/generated/torch.nn.Unflatten.html to
126
+ /// learn about the exact behavior of this module.
127
+ ///
128
+ /// See the documentation for `torch::nn::UnflattenOptions` class to learn what
129
+ /// constructor arguments are supported for this module.
130
+ ///
131
+ /// Example:
132
+ /// ```
133
+ /// Unflatten model(UnflattenOptions(0, {2, 2}));
134
+ /// Unflatten model(UnflattenOptions("B", {{"B1", 2}, {"B2", 2}}));
135
+ /// ```
136
+ class TORCH_API UnflattenImpl : public Cloneable<UnflattenImpl> {
137
+ public:
138
+ UnflattenImpl(int64_t dim, std::vector<int64_t> sizes)
139
+ : UnflattenImpl(UnflattenOptions(dim, sizes)) {}
140
+ UnflattenImpl(std::string dimname, UnflattenOptions::namedshape_t namedshape)
141
+ : UnflattenImpl(UnflattenOptions(dimname, namedshape)) {}
142
+ explicit UnflattenImpl(UnflattenOptions options_);
143
+
144
+ void reset() override;
145
+
146
+ /// Pretty prints the `Unflatten` module into the given `stream`.
147
+ void pretty_print(std::ostream& stream) const override;
148
+
149
+ /// Applies an unflatten transform on the `input`.
150
+ Tensor forward(const Tensor& input);
151
+
152
+ /// The options used to configure this module.
153
+ UnflattenOptions options;
154
+ };
155
+
156
+ /// A `ModuleHolder` subclass for `UnflattenImpl`.
157
+ /// See the documentation for `UnflattenImpl` class to learn what methods it
158
+ /// provides, and examples of how to use `Unflatten` with
159
+ /// `torch::nn::UnflattenOptions`. See the documentation for `ModuleHolder` to
160
+ /// learn about PyTorch's module storage semantics.
161
+ TORCH_MODULE(Unflatten);
162
+
163
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Bilinear ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
164
+
165
+ /// Applies a billinear transformation with optional bias.
166
+ /// See https://pytorch.org/docs/master/generated/torch.nn.Bilinear.html to
167
+ /// learn about the exact behavior of this module.
168
+ ///
169
+ /// See the documentation for `torch::nn::BilinearOptions` class to learn what
170
+ /// constructor arguments are supported for this module.
171
+ ///
172
+ /// Example:
173
+ /// ```
174
+ /// Bilinear model(BilinearOptions(3, 2, 4).bias(false));
175
+ /// ```
176
+ class TORCH_API BilinearImpl : public Cloneable<BilinearImpl> {
177
+ public:
178
+ BilinearImpl(int64_t in1_features, int64_t in2_features, int64_t out_features)
179
+ : BilinearImpl(
180
+ BilinearOptions(in1_features, in2_features, out_features)) {}
181
+ explicit BilinearImpl(const BilinearOptions& options_);
182
+
183
+ void reset() override;
184
+
185
+ void reset_parameters();
186
+
187
+ /// Pretty prints the `Bilinear` module into the given `stream`.
188
+ void pretty_print(std::ostream& stream) const override;
189
+
190
+ /// Applies a bilinear transform on the `input1` and `input2` tensor by
191
+ /// multiplying with the `weight` and optionally adding the `bias`, if
192
+ /// `with_bias` is true in the options.
193
+ Tensor forward(const Tensor& input1, const Tensor& input2);
194
+
195
+ /// The options used to configure this module.
196
+ BilinearOptions options;
197
+
198
+ /// The learned weight.
199
+ Tensor weight;
200
+
201
+ /// The learned bias. If `with_bias` is false in the `options`, this tensor is
202
+ /// undefined.
203
+ Tensor bias;
204
+ };
205
+
206
+ /// A `ModuleHolder` subclass for `BilinearImpl`.
207
+ /// See the documentation for `BilinearImpl` class to learn what methods it
208
+ /// provides, and examples of how to use `Bilinear` with
209
+ /// `torch::nn::BilinearOptions`. See the documentation for `ModuleHolder` to
210
+ /// learn about PyTorch's module storage semantics.
211
+ TORCH_MODULE(Bilinear);
212
+
213
+ } // namespace nn
214
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/loss.h ADDED
@@ -0,0 +1,805 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/expanding_array.h>
4
+ #include <torch/nn/cloneable.h>
5
+ #include <torch/nn/functional/loss.h>
6
+ #include <torch/nn/options/loss.h>
7
+ #include <torch/nn/pimpl.h>
8
+ #include <torch/types.h>
9
+
10
+ #include <torch/csrc/Export.h>
11
+
12
+ #include <cstddef>
13
+ #include <vector>
14
+
15
+ namespace torch {
16
+ namespace nn {
17
+
18
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ L1Loss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
19
+
20
+ /// Creates a criterion that measures the mean absolute error (MAE) between each
21
+ /// element in the input : math :`x` and target : `y`.
22
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.L1Loss to learn
23
+ /// about the exact behavior of this module.
24
+ ///
25
+ /// See the documentation for `torch::nn::L1LossOptions` class to learn what
26
+ /// constructor arguments are supported for this module.
27
+ ///
28
+ /// Example:
29
+ /// ```
30
+ /// L1Loss model(L1LossOptions(torch::kNone));
31
+ /// ```
32
+ struct TORCH_API L1LossImpl : Cloneable<L1LossImpl> {
33
+ explicit L1LossImpl(L1LossOptions options_ = {});
34
+
35
+ void reset() override;
36
+
37
+ /// Pretty prints the `L1Loss` module into the given `stream`.
38
+ void pretty_print(std::ostream& stream) const override;
39
+
40
+ Tensor forward(const Tensor& input, const Tensor& target);
41
+
42
+ /// The options with which this `Module` was constructed.
43
+ L1LossOptions options;
44
+ };
45
+
46
+ /// A `ModuleHolder` subclass for `L1LossImpl`.
47
+ /// See the documentation for `L1LossImpl` class to learn what methods it
48
+ /// provides, and examples of how to use `L1Loss` with
49
+ /// `torch::nn::L1LossOptions`. See the documentation for `ModuleHolder` to
50
+ /// learn about PyTorch's module storage semantics.
51
+ TORCH_MODULE(L1Loss);
52
+
53
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ KLDivLoss
54
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
55
+
56
+ /// The Kullback-Leibler divergence loss measure
57
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.KLDivLoss to learn
58
+ /// about the exact behavior of this module.
59
+ ///
60
+ /// See the documentation for `torch::nn::KLDivLossOptions` class to learn what
61
+ /// constructor arguments are supported for this module.
62
+ ///
63
+ /// Example:
64
+ /// ```
65
+ /// KLDivLoss model(KLDivLossOptions().reduction(torch::kNone));
66
+ /// ```
67
+ struct TORCH_API KLDivLossImpl : Cloneable<KLDivLossImpl> {
68
+ explicit KLDivLossImpl(KLDivLossOptions options_ = {});
69
+
70
+ void reset() override;
71
+
72
+ /// Pretty prints the `KLDivLoss` module into the given `stream`.
73
+ void pretty_print(std::ostream& stream) const override;
74
+
75
+ Tensor forward(const Tensor& input, const Tensor& target);
76
+
77
+ /// The options with which this `Module` was constructed.
78
+ KLDivLossOptions options;
79
+ };
80
+
81
+ /// A `ModuleHolder` subclass for `KLDivLossImpl`.
82
+ /// See the documentation for `KLDivLossImpl` class to learn what methods it
83
+ /// provides, and examples of how to use `KLDivLoss` with
84
+ /// `torch::nn::KLDivLossOptions`. See the documentation for `ModuleHolder` to
85
+ /// learn about PyTorch's module storage semantics.
86
+ TORCH_MODULE(KLDivLoss);
87
+
88
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MSELoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
89
+
90
+ /// Creates a criterion that measures the mean squared error (squared L2 norm)
91
+ /// between each element in the input :math:`x` and target :math:`y`.
92
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.MSELoss to learn
93
+ /// about the exact behavior of this module.
94
+ ///
95
+ /// See the documentation for `torch::nn::MSELossOptions` class to learn what
96
+ /// constructor arguments are supported for this module.
97
+ ///
98
+ /// Example:
99
+ /// ```
100
+ /// MSELoss model(MSELossOptions(torch::kNone));
101
+ /// ```
102
+ struct TORCH_API MSELossImpl : Cloneable<MSELossImpl> {
103
+ explicit MSELossImpl(MSELossOptions options_ = {});
104
+
105
+ void reset() override;
106
+
107
+ /// Pretty prints the `MSELoss` module into the given `stream`.
108
+ void pretty_print(std::ostream& stream) const override;
109
+
110
+ Tensor forward(const Tensor& input, const Tensor& target);
111
+
112
+ /// The options with which this `Module` was constructed.
113
+ MSELossOptions options;
114
+ };
115
+
116
+ /// A `ModuleHolder` subclass for `MSELossImpl`.
117
+ /// See the documentation for `MSELossImpl` class to learn what methods it
118
+ /// provides, and examples of how to use `MSELoss` with
119
+ /// `torch::nn::MSELossOptions`. See the documentation for `ModuleHolder` to
120
+ /// learn about PyTorch's module storage semantics.
121
+ TORCH_MODULE(MSELoss);
122
+
123
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BCELoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
124
+
125
+ /// Creates a criterion that measures the Binary Cross Entropy
126
+ /// between the target and the output.
127
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.BCELoss to learn
128
+ /// about the exact behavior of this module.
129
+ ///
130
+ /// See the documentation for `torch::nn::BCELossOptions` class to learn what
131
+ /// constructor arguments are supported for this module.
132
+ ///
133
+ /// Example:
134
+ /// ```
135
+ /// BCELoss model(BCELossOptions().reduction(torch::kNone).weight(weight));
136
+ /// ```
137
+ struct TORCH_API BCELossImpl : Cloneable<BCELossImpl> {
138
+ explicit BCELossImpl(BCELossOptions options_ = {});
139
+
140
+ void reset() override;
141
+
142
+ /// Pretty prints the `BCELoss` module into the given `stream`.
143
+ void pretty_print(std::ostream& stream) const override;
144
+
145
+ Tensor forward(const Tensor& input, const Tensor& target);
146
+
147
+ /// The options with which this `Module` was constructed.
148
+ BCELossOptions options;
149
+ };
150
+
151
+ /// A `ModuleHolder` subclass for `BCELossImpl`.
152
+ /// See the documentation for `BCELossImpl` class to learn what methods it
153
+ /// provides, and examples of how to use `BCELoss` with
154
+ /// `torch::nn::BCELossOptions`. See the documentation for `ModuleHolder` to
155
+ /// learn about PyTorch's module storage semantics.
156
+ TORCH_MODULE(BCELoss);
157
+
158
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HingeEmbeddingLoss
159
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
160
+
161
+ /// Creates a criterion that measures the loss given an input tensor :math:`x`
162
+ /// and a labels tensor :math:`y` (containing 1 or -1).
163
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.HingeEmbeddingLoss to
164
+ /// learn about the exact behavior of this module.
165
+ ///
166
+ /// See the documentation for `torch::nn::HingeEmbeddingLossOptions` class to
167
+ /// learn what constructor arguments are supported for this module.
168
+ ///
169
+ /// Example:
170
+ /// ```
171
+ /// HingeEmbeddingLoss
172
+ /// model(HingeEmbeddingLossOptions().margin(4).reduction(torch::kNone));
173
+ /// ```
174
+ struct TORCH_API HingeEmbeddingLossImpl : Cloneable<HingeEmbeddingLossImpl> {
175
+ explicit HingeEmbeddingLossImpl(HingeEmbeddingLossOptions options_ = {});
176
+
177
+ void reset() override;
178
+
179
+ /// Pretty prints the `HingeEmbeddingLoss` module into the given `stream`.
180
+ void pretty_print(std::ostream& stream) const override;
181
+
182
+ Tensor forward(const Tensor& input, const Tensor& target);
183
+
184
+ /// The options with which this `Module` was constructed.
185
+ HingeEmbeddingLossOptions options;
186
+ };
187
+
188
+ /// A `ModuleHolder` subclass for `HingeEmbeddingLossImpl`.
189
+ /// See the documentation for `HingeEmbeddingLossImpl` class to learn what
190
+ /// methods it provides, and examples of how to use `HingeEmbeddingLoss` with
191
+ /// `torch::nn::HingeEmbeddingLossOptions`. See the documentation for
192
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
193
+ TORCH_MODULE(HingeEmbeddingLoss);
194
+
195
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MultiMarginLoss
196
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
197
+
198
+ /// Creates a criterion that optimizes a multi-class classification hinge
199
+ /// loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`)
200
+ /// and output :math:`y` (which is a 1D tensor of target class indices, :math:`0
201
+ /// \leq y \leq \text{x.size}(1)-1`). See
202
+ /// https://pytorch.org/docs/master/nn.html#torch.nn.MultiMarginLoss to learn
203
+ /// about the exact behavior of this module.
204
+ ///
205
+ /// See the documentation for `torch::nn::MultiMarginLossOptions` class to learn
206
+ /// what constructor arguments are supported for this module.
207
+ ///
208
+ /// Example:
209
+ /// ```
210
+ /// MultiMarginLoss model(MultiMarginLossOptions().margin(2).weight(weight));
211
+ /// ```
212
+ struct TORCH_API MultiMarginLossImpl : public Cloneable<MultiMarginLossImpl> {
213
+ explicit MultiMarginLossImpl(MultiMarginLossOptions options_ = {});
214
+
215
+ void reset() override;
216
+
217
+ /// Pretty prints the `MultiMarginLoss` module into the given `stream`.
218
+ void pretty_print(std::ostream& stream) const override;
219
+
220
+ Tensor forward(const Tensor& input, const Tensor& target);
221
+
222
+ /// The options with which this `Module` was constructed.
223
+ MultiMarginLossOptions options;
224
+ };
225
+
226
+ /// A `ModuleHolder` subclass for `MultiMarginLossImpl`.
227
+ /// See the documentation for `MultiMarginLossImpl` class to learn what methods
228
+ /// it provides, and examples of how to use `MultiMarginLoss` with
229
+ /// `torch::nn::MultiMarginLossOptions`. See the documentation for
230
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
231
+ TORCH_MODULE(MultiMarginLoss);
232
+
233
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CosineEmbeddingLoss
234
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
235
+
236
+ /// Creates a criterion that measures the loss given input tensors
237
+ /// `input1`, `input2`, and a `Tensor` label `target` with values 1 or
238
+ /// -1. This is used for measuring whether two inputs are similar or
239
+ /// dissimilar, using the cosine distance, and is typically used for learning
240
+ /// nonlinear embeddings or semi-supervised learning.
241
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.CosineEmbeddingLoss to
242
+ /// learn about the exact behavior of this module.
243
+ ///
244
+ /// See the documentation for `torch::nn::CosineEmbeddingLossOptions` class to
245
+ /// learn what constructor arguments are supported for this module.
246
+ ///
247
+ /// Example:
248
+ /// ```
249
+ /// CosineEmbeddingLoss model(CosineEmbeddingLossOptions().margin(0.5));
250
+ /// ```
251
+ struct TORCH_API CosineEmbeddingLossImpl
252
+ : public Cloneable<CosineEmbeddingLossImpl> {
253
+ explicit CosineEmbeddingLossImpl(CosineEmbeddingLossOptions options_ = {});
254
+
255
+ void reset() override;
256
+
257
+ /// Pretty prints the `CosineEmbeddingLoss` module into the given `stream`.
258
+ void pretty_print(std::ostream& stream) const override;
259
+
260
+ Tensor forward(
261
+ const Tensor& input1,
262
+ const Tensor& input2,
263
+ const Tensor& target);
264
+
265
+ /// The options with which this `Module` was constructed.
266
+ CosineEmbeddingLossOptions options;
267
+ };
268
+
269
+ /// A `ModuleHolder` subclass for `CosineEmbeddingLossImpl`.
270
+ /// See the documentation for `CosineEmbeddingLossImpl` class to learn what
271
+ /// methods it provides, and examples of how to use `CosineEmbeddingLoss` with
272
+ /// `torch::nn::CosineEmbeddingLossOptions`. See the documentation for
273
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
274
+ TORCH_MODULE(CosineEmbeddingLoss);
275
+
276
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SmoothL1Loss
277
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
278
+
279
+ /// Creates a criterion that uses a squared term if the absolute
280
+ /// element-wise error falls below beta and an L1 term otherwise.
281
+ /// It is less sensitive to outliers than the `MSELoss` and in some cases
282
+ /// prevents exploding gradients (e.g. see the paper `Fast R-CNN` by Ross
283
+ /// Girshick). See https://pytorch.org/docs/master/nn.html#torch.nn.SmoothL1Loss
284
+ /// to learn about the exact behavior of this module.
285
+ ///
286
+ /// See the documentation for `torch::nn::SmoothL1LossOptions` class to learn
287
+ /// what constructor arguments are supported for this module.
288
+ ///
289
+ /// Example:
290
+ /// ```
291
+ /// SmoothL1Loss model(SmoothL1LossOptions().reduction(torch::kNone).beta(0.5));
292
+ /// ```
293
+ struct TORCH_API SmoothL1LossImpl : public Cloneable<SmoothL1LossImpl> {
294
+ explicit SmoothL1LossImpl(SmoothL1LossOptions options = {});
295
+
296
+ void reset() override;
297
+
298
+ /// Pretty prints the `L1Loss` module into the given `stream`.
299
+ void pretty_print(std::ostream& stream) const override;
300
+
301
+ Tensor forward(const Tensor& input, const Tensor& target);
302
+
303
+ /// The options with which this `Module` was constructed.
304
+ SmoothL1LossOptions options;
305
+ };
306
+
307
+ /// A `ModuleHolder` subclass for `SmoothL1LossImpl`.
308
+ /// See the documentation for `SmoothL1LossImpl` class to learn what methods it
309
+ /// provides, and examples of how to use `SmoothL1Loss` with
310
+ /// `torch::nn::SmoothL1LossOptions`. See the documentation for `ModuleHolder`
311
+ /// to learn about PyTorch's module storage semantics.
312
+ TORCH_MODULE(SmoothL1Loss);
313
+
314
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HuberLoss
315
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
316
+
317
+ /// Creates a criterion that uses a squared term if the absolute
318
+ /// element-wise error falls below delta and a delta-scaled L1 term otherwise.
319
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.HuberLoss to learn
320
+ /// about the exact behavior of this module.
321
+ ///
322
+ /// See the documentation for `torch::nn::HuberLossOptions` class to learn what
323
+ /// constructor arguments are supported for this module.
324
+ ///
325
+ /// Example:
326
+ /// ```
327
+ /// HuberLoss model(HuberLossOptions().reduction(torch::kNone).delta(0.5));
328
+ /// ```
329
+ struct TORCH_API HuberLossImpl : public Cloneable<HuberLossImpl> {
330
+ explicit HuberLossImpl(HuberLossOptions options_ = {});
331
+
332
+ void reset() override;
333
+
334
+ /// Pretty prints the `HuberLoss` module into the given `stream`.
335
+ void pretty_print(std::ostream& stream) const override;
336
+
337
+ Tensor forward(const Tensor& input, const Tensor& target);
338
+
339
+ /// The options with which this `Module` was constructed.
340
+ HuberLossOptions options;
341
+ };
342
+
343
+ /// A `ModuleHolder` subclass for `HuberLossImpl`.
344
+ /// See the documentation for `HuberLossImpl` class to learn what methods it
345
+ /// provides, and examples of how to use `HuberLoss` with
346
+ /// `torch::nn::HuberLossOptions`. See the documentation for `ModuleHolder` to
347
+ /// learn about PyTorch's module storage semantics.
348
+ TORCH_MODULE(HuberLoss);
349
+
350
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MultiLabelMarginLoss
351
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
352
+
353
+ /// Creates a criterion that optimizes a multi-class multi-classification
354
+ /// hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch
355
+ /// `Tensor`) and output :math:`y` (which is a 2D `Tensor` of target class
356
+ /// indices). See
357
+ /// https://pytorch.org/docs/master/nn.html#torch.nn.MultiLabelMarginLoss to
358
+ /// learn about the exact behavior of this module.
359
+ ///
360
+ /// See the documentation for `torch::nn::MultiLabelMarginLossOptions` class to
361
+ /// learn what constructor arguments are supported for this module.
362
+ ///
363
+ /// Example:
364
+ /// ```
365
+ /// MultiLabelMarginLoss model(MultiLabelMarginLossOptions(torch::kNone));
366
+ /// ```
367
+ struct TORCH_API MultiLabelMarginLossImpl
368
+ : public Cloneable<MultiLabelMarginLossImpl> {
369
+ explicit MultiLabelMarginLossImpl(MultiLabelMarginLossOptions options_ = {});
370
+
371
+ void reset() override;
372
+
373
+ /// Pretty prints the `L1Loss` module into the given `stream`.
374
+ void pretty_print(std::ostream& stream) const override;
375
+
376
+ Tensor forward(const Tensor& input, const Tensor& target);
377
+
378
+ /// The options with which this `Module` was constructed.
379
+ MultiLabelMarginLossOptions options;
380
+ };
381
+
382
+ /// A `ModuleHolder` subclass for `MultiLabelMarginLossImpl`.
383
+ /// See the documentation for `MultiLabelMarginLossImpl` class to learn what
384
+ /// methods it provides, and examples of how to use `MultiLabelMarginLoss` with
385
+ /// `torch::nn::MultiLabelMarginLossOptions`. See the documentation for
386
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
387
+ TORCH_MODULE(MultiLabelMarginLoss);
388
+
389
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SoftMarginLoss
390
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
391
+
392
+ /// Creates a criterion that optimizes a two-class classification
393
+ /// logistic loss between input tensor :math:`x` and target tensor :math:`y`
394
+ /// (containing 1 or -1).
395
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.SoftMarginLoss to learn
396
+ /// about the exact behavior of this module.
397
+ ///
398
+ /// See the documentation for `torch::nn::SoftMarginLossOptions` class to learn
399
+ /// what constructor arguments are supported for this module.
400
+ ///
401
+ /// Example:
402
+ /// ```
403
+ /// SoftMarginLoss model(SoftMarginLossOptions(torch::kNone));
404
+ /// ```
405
+ struct TORCH_API SoftMarginLossImpl : public Cloneable<SoftMarginLossImpl> {
406
+ explicit SoftMarginLossImpl(SoftMarginLossOptions options_ = {});
407
+
408
+ /// Pretty prints the `SoftMarginLoss` module into the given `stream`.
409
+ void pretty_print(std::ostream& stream) const override;
410
+
411
+ void reset() override;
412
+
413
+ Tensor forward(const Tensor& input, const Tensor& target);
414
+
415
+ /// The options with which this `Module` was constructed.
416
+ SoftMarginLossOptions options;
417
+ };
418
+
419
+ /// A `ModuleHolder` subclass for `SoftMarginLossImpl`.
420
+ /// See the documentation for `SoftMarginLossImpl` class to learn what methods
421
+ /// it provides, and examples of how to use `SoftMarginLoss` with
422
+ /// `torch::nn::SoftMarginLossOptions`. See the documentation for `ModuleHolder`
423
+ /// to learn about PyTorch's module storage semantics.
424
+ TORCH_MODULE(SoftMarginLoss);
425
+
426
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MultiLabelSoftMarginLoss
427
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
428
+
429
+ /// Creates a criterion that optimizes a multi-label one-versus-all
430
+ /// loss based on max-entropy, between input :math:`x` and target :math:`y` of
431
+ /// size :math:`(N, C)`. See
432
+ /// https://pytorch.org/docs/master/nn.html#torch.nn.MultiLabelSoftMarginLoss to
433
+ /// learn about the exact behavior of this module.
434
+ ///
435
+ /// See the documentation for `torch::nn::MultiLabelSoftMarginLossOptions` class
436
+ /// to learn what constructor arguments are supported for this module.
437
+ ///
438
+ /// Example:
439
+ /// ```
440
+ /// MultiLabelSoftMarginLoss
441
+ /// model(MultiLabelSoftMarginLossOptions().reduction(torch::kNone).weight(weight));
442
+ /// ```
443
+ struct TORCH_API MultiLabelSoftMarginLossImpl
444
+ : public Cloneable<MultiLabelSoftMarginLossImpl> {
445
+ explicit MultiLabelSoftMarginLossImpl(
446
+ MultiLabelSoftMarginLossOptions options_ = {});
447
+
448
+ /// Pretty prints the `MultiLabelSoftMarginLoss` module into the given
449
+ /// `stream`.
450
+ void pretty_print(std::ostream& stream) const override;
451
+
452
+ void reset() override;
453
+
454
+ Tensor forward(const Tensor& input, const Tensor& target);
455
+
456
+ /// The options with which this `Module` was constructed.
457
+ MultiLabelSoftMarginLossOptions options;
458
+ };
459
+
460
+ /// A `ModuleHolder` subclass for `MultiLabelSoftMarginLossImpl`.
461
+ /// See the documentation for `MultiLabelSoftMarginLossImpl` class to learn what
462
+ /// methods it provides, and examples of how to use `MultiLabelSoftMarginLoss`
463
+ /// with `torch::nn::MultiLabelSoftMarginLossOptions`. See the documentation for
464
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
465
+ TORCH_MODULE(MultiLabelSoftMarginLoss);
466
+
467
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TripletMarginLoss
468
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
469
+
470
+ /// Creates a criterion that measures the triplet loss given an input
471
+ /// tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater
472
+ /// than :math:`0`. This is used for measuring a relative similarity between
473
+ /// samples. A triplet is composed by `a`, `p` and `n` (i.e., `anchor`,
474
+ /// `positive examples` and `negative examples` respectively). The
475
+ /// shapes of all input tensors should be :math:`(N, D)`.
476
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.TripletMarginLoss to
477
+ /// learn about the exact behavior of this module.
478
+ ///
479
+ /// See the documentation for `torch::nn::TripletMarginLossOptions` class to
480
+ /// learn what constructor arguments are supported for this module.
481
+ ///
482
+ /// Example:
483
+ /// ```
484
+ /// TripletMarginLoss
485
+ /// model(TripletMarginLossOptions().margin(3).p(2).eps(1e-06).swap(false));
486
+ /// ```
487
+ struct TORCH_API TripletMarginLossImpl
488
+ : public Cloneable<TripletMarginLossImpl> {
489
+ explicit TripletMarginLossImpl(TripletMarginLossOptions options_ = {});
490
+
491
+ void reset() override;
492
+
493
+ /// Pretty prints the `TripletMarginLoss` module into the given `stream`.
494
+ void pretty_print(std::ostream& stream) const override;
495
+
496
+ Tensor forward(
497
+ const Tensor& anchor,
498
+ const Tensor& positive,
499
+ const Tensor& negative);
500
+
501
+ /// The options with which this `Module` was constructed.
502
+ TripletMarginLossOptions options;
503
+ };
504
+
505
+ /// A `ModuleHolder` subclass for `TripletMarginLossImpl`.
506
+ /// See the documentation for `TripletMarginLossImpl` class to learn what
507
+ /// methods it provides, and examples of how to use `TripletMarginLoss` with
508
+ /// `torch::nn::TripletMarginLossOptions`. See the documentation for
509
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
510
+ TORCH_MODULE(TripletMarginLoss);
511
+
512
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TripletMarginWithDistanceLoss
513
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
514
+
515
+ /// Creates a criterion that measures the triplet loss given input
516
+ /// tensors :math:`a`, :math:`p`, and :math:`n` (representing anchor,
517
+ /// positive, and negative examples, respectively); and a nonnegative,
518
+ /// real-valued function
519
+ /// ("distance function") used to compute the relationships between the anchor
520
+ /// and positive example ("positive distance") and the anchor and negative
521
+ /// example ("negative distance").
522
+ /// See
523
+ /// https://pytorch.org/docs/master/nn.html#torch.nn.TripletMarginWithDistanceLoss
524
+ /// to learn about the exact behavior of this module.
525
+ ///
526
+ /// See the documentation for `torch::nn::TripletMarginWithDistanceLossOptions`
527
+ /// class to learn what constructor arguments are supported for this module.
528
+ ///
529
+ /// Example:
530
+ /// ```
531
+ /// TripletMarginWithDistanceLoss
532
+ /// model(TripletMarginWithDistanceLossOptions().margin(3).swap(false));
533
+ /// ```
534
+ struct TORCH_API TripletMarginWithDistanceLossImpl
535
+ : public Cloneable<TripletMarginWithDistanceLossImpl> {
536
+ explicit TripletMarginWithDistanceLossImpl(
537
+ TripletMarginWithDistanceLossOptions options_ = {});
538
+
539
+ void reset() override;
540
+
541
+ /// Pretty prints the `TripletMarginWithDistanceLoss` module into the given
542
+ /// `stream`.
543
+ void pretty_print(std::ostream& stream) const override;
544
+
545
+ Tensor forward(
546
+ const Tensor& anchor,
547
+ const Tensor& positive,
548
+ const Tensor& negative);
549
+
550
+ /// The options with which this `Module` was constructed.
551
+ TripletMarginWithDistanceLossOptions options;
552
+ };
553
+
554
+ /// A `ModuleHolder` subclass for `TripletMarginWithDistanceLossImpl`.
555
+ /// See the documentation for `TripletMarginWithDistanceLossImpl` class to learn
556
+ /// what methods it provides, and examples of how to use
557
+ /// `TripletMarginWithDistanceLoss` with
558
+ /// `torch::nn::TripletMarginWithDistanceLossOptions`.
559
+ /// See the documentation for `ModuleHolder` to learn about PyTorch's
560
+ /// module storage semantics.
561
+ TORCH_MODULE(TripletMarginWithDistanceLoss);
562
+
563
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CTCLoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
564
+
565
+ /// The Connectionist Temporal Classification loss.
566
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.CTCLoss to learn
567
+ /// about the exact behavior of this module.
568
+ ///
569
+ /// See the documentation for `torch::nn::CTCLossOptions` class to learn what
570
+ /// constructor arguments are supported for this module.
571
+ ///
572
+ /// Example:
573
+ /// ```
574
+ /// CTCLoss
575
+ /// model(CTCLossOptions().blank(42).zero_infinity(false).reduction(torch::kSum));
576
+ /// ```
577
+ struct TORCH_API CTCLossImpl : public Cloneable<CTCLossImpl> {
578
+ explicit CTCLossImpl(CTCLossOptions options_ = {});
579
+
580
+ void reset() override;
581
+
582
+ /// Pretty prints the `CTCLoss` module into the given `stream`.
583
+ void pretty_print(std::ostream& stream) const override;
584
+
585
+ Tensor forward(
586
+ const Tensor& log_probs,
587
+ const Tensor& targets,
588
+ const Tensor& input_lengths,
589
+ const Tensor& target_lengths);
590
+
591
+ /// The options with which this `Module` was constructed.
592
+ CTCLossOptions options;
593
+ };
594
+
595
+ /// A `ModuleHolder` subclass for `CTCLossImpl`.
596
+ /// See the documentation for `CTCLossImpl` class to learn what methods it
597
+ /// provides, and examples of how to use `CTCLoss` with
598
+ /// `torch::nn::CTCLossOptions`. See the documentation for `ModuleHolder` to
599
+ /// learn about PyTorch's module storage semantics.
600
+ TORCH_MODULE(CTCLoss);
601
+
602
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PoissonNLLLoss
603
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
604
+
605
+ /// Negative log likelihood loss with Poisson distribution of target.
606
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.PoissonNLLLoss to learn
607
+ /// about the exact behavior of this module.
608
+ ///
609
+ /// See the documentation for `torch::nn::PoissonNLLLossOptions` class to learn
610
+ /// what constructor arguments are supported for this module.
611
+ ///
612
+ /// Example:
613
+ /// ```
614
+ /// PoissonNLLLoss
615
+ /// model(PoissonNLLLossOptions().log_input(false).full(true).eps(0.42).reduction(torch::kSum));
616
+ /// ```
617
+ struct TORCH_API PoissonNLLLossImpl : public Cloneable<PoissonNLLLossImpl> {
618
+ explicit PoissonNLLLossImpl(PoissonNLLLossOptions options_ = {});
619
+
620
+ void reset() override;
621
+
622
+ /// Pretty prints the `PoissonNLLLoss` module into the given `stream`.
623
+ void pretty_print(std::ostream& stream) const override;
624
+
625
+ Tensor forward(const Tensor& log_input, const Tensor& targets);
626
+
627
+ /// The options with which this `Module` was constructed.
628
+ PoissonNLLLossOptions options;
629
+ };
630
+
631
+ /// A `ModuleHolder` subclass for `PoissonNLLLossImpl`.
632
+ /// See the documentation for `PoissonNLLLossImpl` class to learn what methods
633
+ /// it provides, and examples of how to use `PoissonNLLLoss` with
634
+ /// `torch::nn::PoissonNLLLossOptions`. See the documentation for `ModuleHolder`
635
+ /// to learn about PyTorch's module storage semantics.
636
+ TORCH_MODULE(PoissonNLLLoss);
637
+
638
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MarginRankingLoss
639
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
640
+
641
+ /// Creates a criterion that measures the loss given
642
+ /// inputs :math:`x1`, :math:`x2`, two 1D mini-batch `Tensors`,
643
+ /// and a label 1D mini-batch tensor :math:`y` (containing 1 or -1).
644
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.MarginRankingLoss to
645
+ /// learn about the exact behavior of this module.
646
+ ///
647
+ /// See the documentation for `torch::nn::MarginRankingLossOptions` class to
648
+ /// learn what constructor arguments are supported for this module.
649
+ ///
650
+ /// Example:
651
+ /// ```
652
+ /// MarginRankingLoss
653
+ /// model(MarginRankingLossOptions().margin(0.5).reduction(torch::kSum));
654
+ /// ```
655
+ struct TORCH_API MarginRankingLossImpl
656
+ : public Cloneable<MarginRankingLossImpl> {
657
+ explicit MarginRankingLossImpl(MarginRankingLossOptions options_ = {});
658
+
659
+ void reset() override;
660
+
661
+ /// Pretty prints the `MarginRankingLoss` module into the given `stream`.
662
+ void pretty_print(std::ostream& stream) const override;
663
+
664
+ Tensor forward(
665
+ const Tensor& input1,
666
+ const Tensor& input2,
667
+ const Tensor& targets);
668
+
669
+ /// The options with which this `Module` was constructed.
670
+ MarginRankingLossOptions options;
671
+ };
672
+
673
+ /// A `ModuleHolder` subclass for `MarginRankingLossImpl`.
674
+ /// See the documentation for `MarginRankingLossImpl` class to learn what
675
+ /// methods it provides, and examples of how to use `MarginRankingLoss` with
676
+ /// `torch::nn::MarginRankingLossOptions`. See the documentation for
677
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
678
+ TORCH_MODULE(MarginRankingLoss);
679
+
680
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NLLLoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
681
+
682
+ /// The negative log likelihood loss. It is useful to train a classification
683
+ /// problem with `C` classes.
684
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.NLLLoss to learn
685
+ /// about the exact behavior of this module.
686
+ ///
687
+ /// See the documentation for `torch::nn::NLLLossOptions` class to learn what
688
+ /// constructor arguments are supported for this module.
689
+ ///
690
+ /// Example:
691
+ /// ```
692
+ /// NLLLoss model(NLLLossOptions().ignore_index(-100).reduction(torch::kMean));
693
+ /// ```
694
+ struct TORCH_API NLLLossImpl : public Cloneable<NLLLossImpl> {
695
+ explicit NLLLossImpl(NLLLossOptions options_ = {});
696
+
697
+ /// Pretty prints the `NLLLoss` module into the given `stream`.
698
+ void pretty_print(std::ostream& stream) const override;
699
+
700
+ void reset() override;
701
+
702
+ Tensor forward(const Tensor& input, const Tensor& target);
703
+
704
+ /// The options with which this `Module` was constructed.
705
+ NLLLossOptions options;
706
+
707
+ /// A manual rescaling weight given to to each class.
708
+ Tensor weight;
709
+ };
710
+
711
+ /// A `ModuleHolder` subclass for `NLLLossImpl`.
712
+ /// See the documentation for `NLLLossImpl` class to learn what methods it
713
+ /// provides, and examples of how to use `NLLLoss` with
714
+ /// `torch::nn::NLLLossOptions`. See the documentation for `ModuleHolder` to
715
+ /// learn about PyTorch's module storage semantics.
716
+ TORCH_MODULE(NLLLoss);
717
+
718
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CrossEntropyLoss
719
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
720
+
721
+ /// Creates a criterion that computes cross entropy loss between input and
722
+ /// target. See
723
+ /// https://pytorch.org/docs/master/nn.html#torch.nn.CrossEntropyLoss to learn
724
+ /// about the exact behavior of this module.
725
+ ///
726
+ /// See the documentation for `torch::nn::CrossEntropyLossOptions` class to
727
+ /// learn what constructor arguments are supported for this module.
728
+ ///
729
+ /// Example:
730
+ /// ```
731
+ /// CrossEntropyLoss
732
+ /// model(CrossEntropyLossOptions().ignore_index(-100).reduction(torch::kMean));
733
+ /// ```
734
+ struct TORCH_API CrossEntropyLossImpl : public Cloneable<CrossEntropyLossImpl> {
735
+ explicit CrossEntropyLossImpl(CrossEntropyLossOptions options_ = {});
736
+
737
+ void reset() override;
738
+
739
+ /// Pretty prints the `CrossEntropyLoss` module into the given `stream`.
740
+ void pretty_print(std::ostream& stream) const override;
741
+
742
+ Tensor forward(const Tensor& input, const Tensor& target);
743
+
744
+ /// The options with which this `Module` was constructed.
745
+ CrossEntropyLossOptions options;
746
+
747
+ /// A manual rescaling weight given to to each class.
748
+ Tensor weight;
749
+ };
750
+
751
+ /// A `ModuleHolder` subclass for `CrossEntropyLossImpl`.
752
+ /// See the documentation for `CrossEntropyLossImpl` class to learn what methods
753
+ /// it provides, and examples of how to use `CrossEntropyLoss` with
754
+ /// `torch::nn::CrossEntropyLossOptions`. See the documentation for
755
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
756
+ TORCH_MODULE(CrossEntropyLoss);
757
+
758
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BCEWithLogitsLoss
759
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
760
+
761
+ /// This loss combines a `Sigmoid` layer and the `BCELoss` in one single
762
+ /// class. This version is more numerically stable than using a plain `Sigmoid`
763
+ /// followed by a `BCELoss` as, by combining the operations into one layer,
764
+ /// we take advantage of the log-sum-exp trick for numerical stability.
765
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.BCEWithLogitsLoss to
766
+ /// learn about the exact behavior of this module.
767
+ ///
768
+ /// See the documentation for `torch::nn::BCEWithLogitsLossOptions` class to
769
+ /// learn what constructor arguments are supported for this module.
770
+ ///
771
+ /// Example:
772
+ /// ```
773
+ /// BCEWithLogitsLoss
774
+ /// model(BCEWithLogitsLossOptions().reduction(torch::kNone).weight(weight));
775
+ /// ```
776
+ struct TORCH_API BCEWithLogitsLossImpl
777
+ : public Cloneable<BCEWithLogitsLossImpl> {
778
+ explicit BCEWithLogitsLossImpl(BCEWithLogitsLossOptions options_ = {});
779
+
780
+ void reset() override;
781
+
782
+ /// Pretty prints the `BCEWithLogitsLoss` module into the given `stream`.
783
+ void pretty_print(std::ostream& stream) const override;
784
+
785
+ Tensor forward(const Tensor& input, const Tensor& target);
786
+
787
+ /// The options with which this `Module` was constructed.
788
+ BCEWithLogitsLossOptions options;
789
+
790
+ /// A manual rescaling weight given to the loss of each batch element.
791
+ Tensor weight;
792
+
793
+ /// A weight of positive examples.
794
+ Tensor pos_weight;
795
+ };
796
+
797
+ /// A `ModuleHolder` subclass for `BCEWithLogitsLossImpl`.
798
+ /// See the documentation for `BCEWithLogitsLossImpl` class to learn what
799
+ /// methods it provides, and examples of how to use `BCEWithLogitsLoss` with
800
+ /// `torch::nn::BCEWithLogitsLossOptions`. See the documentation for
801
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
802
+ TORCH_MODULE(BCEWithLogitsLoss);
803
+
804
+ } // namespace nn
805
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/normalization.h ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/functional/normalization.h>
5
+ #include <torch/nn/modules/_functions.h>
6
+ #include <torch/nn/options/normalization.h>
7
+ #include <torch/nn/pimpl.h>
8
+ #include <torch/types.h>
9
+
10
+ #include <cstddef>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace nn {
15
+
16
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LayerNorm ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
17
+
18
+ /// Applies Layer Normalization over a mini-batch of inputs as described in
19
+ /// the paper `Layer Normalization`_ .
20
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.LayerNorm to learn
21
+ /// about the exact behavior of this module.
22
+ ///
23
+ /// See the documentation for `torch::nn::LayerNormOptions` class to learn what
24
+ /// constructor arguments are supported for this module.
25
+ ///
26
+ /// Example:
27
+ /// ```
28
+ /// LayerNorm model(LayerNormOptions({2,
29
+ /// 2}).elementwise_affine(false).eps(2e-5));
30
+ /// ```
31
+ class TORCH_API LayerNormImpl : public torch::nn::Cloneable<LayerNormImpl> {
32
+ public:
33
+ LayerNormImpl(std::vector<int64_t> normalized_shape)
34
+ : LayerNormImpl(LayerNormOptions(normalized_shape)) {}
35
+ explicit LayerNormImpl(LayerNormOptions options_);
36
+
37
+ void reset() override;
38
+
39
+ void reset_parameters();
40
+
41
+ /// Pretty prints the `LayerNorm` module into the given `stream`.
42
+ void pretty_print(std::ostream& stream) const override;
43
+
44
+ /// Applies layer normalization over a mini-batch of inputs as described in
45
+ /// the paper `Layer Normalization`_ .
46
+ ///
47
+ /// The mean and standard-deviation are calculated separately over the last
48
+ /// certain number dimensions which have to be of the shape specified by
49
+ /// input `normalized_shape`.
50
+ ///
51
+ /// `Layer Normalization`: https://arxiv.org/abs/1607.06450
52
+ Tensor forward(const Tensor& input);
53
+
54
+ /// The options with which this module was constructed.
55
+ LayerNormOptions options;
56
+
57
+ /// The learned weight.
58
+ /// Initialized to ones if the `elementwise_affine` option is set to `true`
59
+ /// upon construction.
60
+ Tensor weight;
61
+
62
+ /// The learned bias.
63
+ /// Initialized to zeros `elementwise_affine` option is set to `true` upon
64
+ /// construction.
65
+ Tensor bias;
66
+ };
67
+
68
+ /// A `ModuleHolder` subclass for `LayerNormImpl`.
69
+ /// See the documentation for `LayerNormImpl` class to learn what methods it
70
+ /// provides, and examples of how to use `LayerNorm` with
71
+ /// `torch::nn::LayerNormOptions`. See the documentation for `ModuleHolder` to
72
+ /// learn about PyTorch's module storage semantics.
73
+ TORCH_MODULE(LayerNorm);
74
+
75
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LocalResponseNorm
76
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
77
+
78
+ /// Applies local response normalization over an input signal composed
79
+ /// of several input planes, where channels occupy the second dimension.
80
+ /// Applies normalization across channels.
81
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.LocalResponseNorm to
82
+ /// learn about the exact behavior of this module.
83
+ ///
84
+ /// See the documentation for `torch::nn::LocalResponseNormOptions` class to
85
+ /// learn what constructor arguments are supported for this module.
86
+ ///
87
+ /// Example:
88
+ /// ```
89
+ /// LocalResponseNorm
90
+ /// model(LocalResponseNormOptions(2).alpha(0.0002).beta(0.85).k(2.));
91
+ /// ```
92
+ class TORCH_API LocalResponseNormImpl
93
+ : public Cloneable<LocalResponseNormImpl> {
94
+ public:
95
+ LocalResponseNormImpl(int64_t size)
96
+ : LocalResponseNormImpl(LocalResponseNormOptions(size)) {}
97
+ explicit LocalResponseNormImpl(const LocalResponseNormOptions& options_);
98
+
99
+ Tensor forward(const Tensor& input);
100
+
101
+ void reset() override;
102
+
103
+ /// Pretty prints the `LocalResponseNormImpl` module into the given `stream`.
104
+ void pretty_print(std::ostream& stream) const override;
105
+
106
+ /// The options with which this `Module` was constructed.
107
+ LocalResponseNormOptions options;
108
+ };
109
+
110
+ /// A `ModuleHolder` subclass for `LocalResponseNormImpl`.
111
+ /// See the documentation for `LocalResponseNormImpl` class to learn what
112
+ /// methods it provides, and examples of how to use `LocalResponseNorm` with
113
+ /// `torch::nn::LocalResponseNormOptions`. See the documentation for
114
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
115
+ TORCH_MODULE(LocalResponseNorm);
116
+
117
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CrossMapLRN2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
118
+
119
+ /// See the documentation for `torch::nn::CrossMapLRN2dOptions` class to learn
120
+ /// what constructor arguments are supported for this module.
121
+ ///
122
+ /// Example:
123
+ /// ```
124
+ /// CrossMapLRN2d model(CrossMapLRN2dOptions(3).alpha(1e-5).beta(0.1).k(10));
125
+ /// ```
126
+ class TORCH_API CrossMapLRN2dImpl
127
+ : public torch::nn::Cloneable<CrossMapLRN2dImpl> {
128
+ public:
129
+ CrossMapLRN2dImpl(int64_t size)
130
+ : CrossMapLRN2dImpl(CrossMapLRN2dOptions(size)) {}
131
+ explicit CrossMapLRN2dImpl(const CrossMapLRN2dOptions& options_)
132
+ : options(options_) {}
133
+
134
+ void reset() override;
135
+
136
+ /// Pretty prints the `CrossMapLRN2d` module into the given `stream`.
137
+ void pretty_print(std::ostream& stream) const override;
138
+
139
+ torch::Tensor forward(const torch::Tensor& input);
140
+
141
+ CrossMapLRN2dOptions options;
142
+ };
143
+
144
+ /// A `ModuleHolder` subclass for `CrossMapLRN2dImpl`.
145
+ /// See the documentation for `CrossMapLRN2dImpl` class to learn what methods it
146
+ /// provides, and examples of how to use `CrossMapLRN2d` with
147
+ /// `torch::nn::CrossMapLRN2dOptions`. See the documentation for `ModuleHolder`
148
+ /// to learn about PyTorch's module storage semantics.
149
+ TORCH_MODULE(CrossMapLRN2d);
150
+
151
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GroupNorm ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
152
+
153
+ /// Applies Group Normalization over a mini-batch of inputs as described in
154
+ /// the paper `Group Normalization`_ .
155
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.GroupNorm to learn
156
+ /// about the exact behavior of this module.
157
+ ///
158
+ /// See the documentation for `torch::nn::GroupNormOptions` class to learn what
159
+ /// constructor arguments are supported for this module.
160
+ ///
161
+ /// Example:
162
+ /// ```
163
+ /// GroupNorm model(GroupNormOptions(2, 2).eps(2e-5).affine(false));
164
+ /// ```
165
+ class TORCH_API GroupNormImpl : public torch::nn::Cloneable<GroupNormImpl> {
166
+ public:
167
+ GroupNormImpl(int64_t num_groups, int64_t num_channels)
168
+ : GroupNormImpl(GroupNormOptions(num_groups, num_channels)) {}
169
+ explicit GroupNormImpl(const GroupNormOptions& options_);
170
+
171
+ void reset() override;
172
+
173
+ void reset_parameters();
174
+
175
+ /// Pretty prints the `GroupNorm` module into the given `stream`.
176
+ void pretty_print(std::ostream& stream) const override;
177
+
178
+ Tensor forward(const Tensor& input);
179
+
180
+ /// The options with which this module was constructed.
181
+ GroupNormOptions options;
182
+
183
+ /// The learned weight.
184
+ Tensor weight;
185
+
186
+ /// The learned bias.
187
+ Tensor bias;
188
+ };
189
+
190
+ /// A `ModuleHolder` subclass for `GroupNormImpl`.
191
+ /// See the documentation for `GroupNormImpl` class to learn what methods it
192
+ /// provides, and examples of how to use `GroupNorm` with
193
+ /// `torch::nn::GroupNormOptions`. See the documentation for `ModuleHolder` to
194
+ /// learn about PyTorch's module storage semantics.
195
+ TORCH_MODULE(GroupNorm);
196
+
197
+ } // namespace nn
198
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/padding.h ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/expanding_array.h>
4
+ #include <torch/nn/cloneable.h>
5
+ #include <torch/nn/functional/padding.h>
6
+
7
+ #include <torch/csrc/Export.h>
8
+
9
+ namespace torch {
10
+ namespace nn {
11
+
12
+ /// Base class for all (dimension-specialized) ReflectionPad modules.
13
+ template <size_t D, typename Derived>
14
+ class TORCH_API ReflectionPadImpl : public torch::nn::Cloneable<Derived> {
15
+ public:
16
+ ReflectionPadImpl(ExpandingArray<D * 2> padding)
17
+ : ReflectionPadImpl(ReflectionPadOptions<D>(padding)) {}
18
+ explicit ReflectionPadImpl(const ReflectionPadOptions<D>& options_);
19
+
20
+ void reset() override;
21
+
22
+ Tensor forward(const Tensor& input);
23
+
24
+ /// Pretty prints the `ReflectionPad{1,2}d` module into the given `stream`.
25
+ void pretty_print(std::ostream& stream) const override;
26
+
27
+ /// The options with which this `Module` was constructed.
28
+ ReflectionPadOptions<D> options;
29
+ };
30
+
31
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReflectionPad1d
32
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
33
+
34
+ /// Applies ReflectionPad over a 1-D input.
35
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ReflectionPad1d to
36
+ /// learn about the exact behavior of this module.
37
+ ///
38
+ /// See the documentation for `torch::nn::ReflectionPad1dOptions` class to learn
39
+ /// what constructor arguments are supported for this module.
40
+ ///
41
+ /// Example:
42
+ /// ```
43
+ /// ReflectionPad1d model(ReflectionPad1dOptions({3, 1}));
44
+ /// ```
45
+ class TORCH_API ReflectionPad1dImpl
46
+ : public ReflectionPadImpl<1, ReflectionPad1dImpl> {
47
+ public:
48
+ using ReflectionPadImpl<1, ReflectionPad1dImpl>::ReflectionPadImpl;
49
+ };
50
+
51
+ /// A `ModuleHolder` subclass for `ReflectionPad1dImpl`.
52
+ /// See the documentation for `ReflectionPad1dImpl` class to learn what methods
53
+ /// it provides, and examples of how to use `ReflectionPad1d` with
54
+ /// `torch::nn::ReflectionPad1dOptions`. See the documentation for
55
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
56
+ TORCH_MODULE(ReflectionPad1d);
57
+
58
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReflectionPad2d
59
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
60
+
61
+ /// Applies ReflectionPad over a 2-D input.
62
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ReflectionPad2d to
63
+ /// learn about the exact behavior of this module.
64
+ ///
65
+ /// See the documentation for `torch::nn::ReflectionPad2dOptions` class to learn
66
+ /// what constructor arguments are supported for this module.
67
+ ///
68
+ /// Example:
69
+ /// ```
70
+ /// ReflectionPad2d model(ReflectionPad2dOptions({1, 1, 2, 0}));
71
+ /// ```
72
+ class TORCH_API ReflectionPad2dImpl
73
+ : public ReflectionPadImpl<2, ReflectionPad2dImpl> {
74
+ public:
75
+ using ReflectionPadImpl<2, ReflectionPad2dImpl>::ReflectionPadImpl;
76
+ };
77
+
78
+ /// A `ModuleHolder` subclass for `ReflectionPad2dImpl`.
79
+ /// See the documentation for `ReflectionPad2dImpl` class to learn what methods
80
+ /// it provides, and examples of how to use `ReflectionPad2d` with
81
+ /// `torch::nn::ReflectionPad2dOptions`. See the documentation for
82
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
83
+ TORCH_MODULE(ReflectionPad2d);
84
+
85
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReflectionPad3d
86
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
87
+
88
+ /// Applies ReflectionPad over a 3-D input.
89
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ReflectionPad3d to
90
+ /// learn about the exact behavior of this module.
91
+ ///
92
+ /// See the documentation for `torch::nn::ReflectionPad3dOptions` class to learn
93
+ /// what constructor arguments are supported for this module.
94
+ ///
95
+ /// Example:
96
+ /// ```
97
+ /// ReflectionPad3d model(ReflectionPad3dOptions(1));
98
+ /// ReflectionPad3d model(ReflectionPad3dOptions({1, 1, 2, 0, 1, 2}));
99
+ /// ```
100
+ class TORCH_API ReflectionPad3dImpl
101
+ : public ReflectionPadImpl<3, ReflectionPad3dImpl> {
102
+ public:
103
+ using ReflectionPadImpl<3, ReflectionPad3dImpl>::ReflectionPadImpl;
104
+ };
105
+
106
+ /// A `ModuleHolder` subclass for `ReflectionPad3dImpl`.
107
+ /// See the documentation for `ReflectionPad3dImpl` class to learn what methods
108
+ /// it provides, and examples of how to use `ReflectionPad3d` with
109
+ /// `torch::nn::ReflectionPad3dOptions`. See the documentation for
110
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
111
+ TORCH_MODULE(ReflectionPad3d);
112
+
113
+ // ============================================================================
114
+
115
+ /// Base class for all (dimension-specialized) ReplicationPad modules.
116
+ template <size_t D, typename Derived>
117
+ class TORCH_API ReplicationPadImpl : public torch::nn::Cloneable<Derived> {
118
+ public:
119
+ ReplicationPadImpl(ExpandingArray<D * 2> padding)
120
+ : ReplicationPadImpl(ReplicationPadOptions<D>(padding)) {}
121
+ explicit ReplicationPadImpl(const ReplicationPadOptions<D>& options_);
122
+
123
+ void reset() override;
124
+
125
+ Tensor forward(const Tensor& input);
126
+
127
+ /// Pretty prints the `ReplicationPad{1,2}d` module into the given `stream`.
128
+ void pretty_print(std::ostream& stream) const override;
129
+
130
+ /// The options with which this `Module` was constructed.
131
+ ReplicationPadOptions<D> options;
132
+ };
133
+
134
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReplicationPad1d
135
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
136
+
137
+ /// Applies ReplicationPad over a 1-D input.
138
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ReplicationPad1d to
139
+ /// learn about the exact behavior of this module.
140
+ ///
141
+ /// See the documentation for `torch::nn::ReplicationPad1dOptions` class to
142
+ /// learn what constructor arguments are supported for this module.
143
+ ///
144
+ /// Example:
145
+ /// ```
146
+ /// ReplicationPad1d model(ReplicationPad1dOptions({3, 1}));
147
+ /// ```
148
+ class TORCH_API ReplicationPad1dImpl
149
+ : public ReplicationPadImpl<1, ReplicationPad1dImpl> {
150
+ public:
151
+ using ReplicationPadImpl<1, ReplicationPad1dImpl>::ReplicationPadImpl;
152
+ };
153
+
154
+ /// A `ModuleHolder` subclass for `ReplicationPad1dImpl`.
155
+ /// See the documentation for `ReplicationPad1dImpl` class to learn what methods
156
+ /// it provides, and examples of how to use `ReplicationPad1d` with
157
+ /// `torch::nn::ReplicationPad1dOptions`. See the documentation for
158
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
159
+ TORCH_MODULE(ReplicationPad1d);
160
+
161
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReplicationPad2d
162
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
163
+
164
+ /// Applies ReplicationPad over a 2-D input.
165
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ReplicationPad2d to
166
+ /// learn about the exact behavior of this module.
167
+ ///
168
+ /// See the documentation for `torch::nn::ReplicationPad2dOptions` class to
169
+ /// learn what constructor arguments are supported for this module.
170
+ ///
171
+ /// Example:
172
+ /// ```
173
+ /// ReplicationPad2d model(ReplicationPad2dOptions({1, 1, 2, 0}));
174
+ /// ```
175
+ class TORCH_API ReplicationPad2dImpl
176
+ : public ReplicationPadImpl<2, ReplicationPad2dImpl> {
177
+ public:
178
+ using ReplicationPadImpl<2, ReplicationPad2dImpl>::ReplicationPadImpl;
179
+ };
180
+
181
+ /// A `ModuleHolder` subclass for `ReplicationPad2dImpl`.
182
+ /// See the documentation for `ReplicationPad2dImpl` class to learn what methods
183
+ /// it provides, and examples of how to use `ReplicationPad2d` with
184
+ /// `torch::nn::ReplicationPad2dOptions`. See the documentation for
185
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
186
+ TORCH_MODULE(ReplicationPad2d);
187
+
188
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ReplicationPad3d
189
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
190
+
191
+ /// Applies ReplicationPad over a 3-D input.
192
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ReplicationPad3d to
193
+ /// learn about the exact behavior of this module.
194
+ ///
195
+ /// See the documentation for `torch::nn::ReplicationPad3dOptions` class to
196
+ /// learn what constructor arguments are supported for this module.
197
+ ///
198
+ /// Example:
199
+ /// ```
200
+ /// ReplicationPad3d model(ReplicationPad3dOptions({1, 2, 1, 2, 1, 2}));
201
+ /// ```
202
+ class TORCH_API ReplicationPad3dImpl
203
+ : public ReplicationPadImpl<3, ReplicationPad3dImpl> {
204
+ public:
205
+ using ReplicationPadImpl<3, ReplicationPad3dImpl>::ReplicationPadImpl;
206
+ };
207
+
208
+ /// A `ModuleHolder` subclass for `ReplicationPad3dImpl`.
209
+ /// See the documentation for `ReplicationPad3dImpl` class to learn what methods
210
+ /// it provides, and examples of how to use `ReplicationPad3d` with
211
+ /// `torch::nn::ReplicationPad3dOptions`. See the documentation for
212
+ /// `ModuleHolder` to learn about PyTorch's module storage semantics.
213
+ TORCH_MODULE(ReplicationPad3d);
214
+
215
+ // ============================================================================
216
+
217
+ /// Base class for all (dimension-specialized) ZeroPad modules.
218
+ template <size_t D, typename Derived>
219
+ class TORCH_API ZeroPadImpl : public torch::nn::Cloneable<Derived> {
220
+ public:
221
+ ZeroPadImpl(ExpandingArray<D * 2> padding)
222
+ : ZeroPadImpl(ZeroPadOptions<D>(padding)) {}
223
+ explicit ZeroPadImpl(const ZeroPadOptions<D>& options_);
224
+
225
+ void reset() override;
226
+
227
+ Tensor forward(const Tensor& input);
228
+
229
+ /// Pretty prints the `ZeroPad{1,2}d` module into the given `stream`.
230
+ void pretty_print(std::ostream& stream) const override;
231
+
232
+ /// The options with which this `Module` was constructed.
233
+ ZeroPadOptions<D> options;
234
+ };
235
+
236
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ZeroPad1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
237
+ // Applies ZeroPad over a 1-D input.
238
+ class TORCH_API ZeroPad1dImpl : public ZeroPadImpl<1, ZeroPad1dImpl> {
239
+ public:
240
+ using ZeroPadImpl<1, ZeroPad1dImpl>::ZeroPadImpl;
241
+ };
242
+
243
+ /// A `ModuleHolder` subclass for `ZeroPad1dImpl`.
244
+ /// See the documentation for `ZeroPad1dImpl` class to learn what methods it
245
+ /// provides, and examples of how to use `ZeroPad1d` with
246
+ /// `torch::nn::ZeroPad1dOptions`. See the documentation for `ModuleHolder` to
247
+ /// learn about PyTorch's module storage semantics.
248
+ TORCH_MODULE(ZeroPad1d);
249
+
250
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ZeroPad2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
251
+ // Applies ZeroPad over a 2-D input.
252
+ class TORCH_API ZeroPad2dImpl : public ZeroPadImpl<2, ZeroPad2dImpl> {
253
+ public:
254
+ using ZeroPadImpl<2, ZeroPad2dImpl>::ZeroPadImpl;
255
+ };
256
+
257
+ /// A `ModuleHolder` subclass for `ZeroPad2dImpl`.
258
+ /// See the documentation for `ZeroPad2dImpl` class to learn what methods it
259
+ /// provides, and examples of how to use `ZeroPad2d` with
260
+ /// `torch::nn::ZeroPad2dOptions`. See the documentation for `ModuleHolder` to
261
+ /// learn about PyTorch's module storage semantics.
262
+ TORCH_MODULE(ZeroPad2d);
263
+
264
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ZeroPad3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
265
+ // Applies ZeroPad over a 3-D input.
266
+ class TORCH_API ZeroPad3dImpl : public ZeroPadImpl<3, ZeroPad3dImpl> {
267
+ public:
268
+ using ZeroPadImpl<3, ZeroPad3dImpl>::ZeroPadImpl;
269
+ };
270
+
271
+ /// A `ModuleHolder` subclass for `ZeroPad3dImpl`.
272
+ /// See the documentation for `ZeroPad3dImpl` class to learn what methods it
273
+ /// provides, and examples of how to use `ZeroPad3d` with
274
+ /// `torch::nn::ZeroPad3dOptions`. See the documentation for `ModuleHolder` to
275
+ /// learn about PyTorch's module storage semantics.
276
+ TORCH_MODULE(ZeroPad3d);
277
+
278
+ // ============================================================================
279
+
280
+ /// Base class for all (dimension-specialized) ConstantPad modules.
281
+ template <size_t D, typename Derived>
282
+ class TORCH_API ConstantPadImpl : public torch::nn::Cloneable<Derived> {
283
+ public:
284
+ ConstantPadImpl(ExpandingArray<D * 2> padding, double value)
285
+ : ConstantPadImpl(ConstantPadOptions<D>(padding, value)) {}
286
+ explicit ConstantPadImpl(const ConstantPadOptions<D>& options_);
287
+
288
+ void reset() override;
289
+
290
+ Tensor forward(const Tensor& input);
291
+
292
+ /// Pretty prints the `ConstantPad{1,2}d` module into the given `stream`.
293
+ void pretty_print(std::ostream& stream) const override;
294
+
295
+ /// The options with which this `Module` was constructed.
296
+ ConstantPadOptions<D> options;
297
+ };
298
+
299
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConstantPad1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
300
+
301
+ /// Applies ConstantPad over a 1-D input.
302
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ConstantPad1d to learn
303
+ /// about the exact behavior of this module.
304
+ ///
305
+ /// See the documentation for `torch::nn::ConstantPad1dOptions` class to learn
306
+ /// what constructor arguments are supported for this module.
307
+ ///
308
+ /// Example:
309
+ /// ```
310
+ /// ConstantPad1d model(ConstantPad1dOptions({3, 1}, 3.5));
311
+ /// ```
312
+ class TORCH_API ConstantPad1dImpl
313
+ : public ConstantPadImpl<1, ConstantPad1dImpl> {
314
+ public:
315
+ using ConstantPadImpl<1, ConstantPad1dImpl>::ConstantPadImpl;
316
+ };
317
+
318
+ /// A `ModuleHolder` subclass for `ConstantPad1dImpl`.
319
+ /// See the documentation for `ConstantPad1dImpl` class to learn what methods it
320
+ /// provides, and examples of how to use `ConstantPad1d` with
321
+ /// `torch::nn::ConstantPad1dOptions`. See the documentation for `ModuleHolder`
322
+ /// to learn about PyTorch's module storage semantics.
323
+ TORCH_MODULE(ConstantPad1d);
324
+
325
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConstantPad2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
326
+
327
+ /// Applies ConstantPad over a 2-D input.
328
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ConstantPad2d to learn
329
+ /// about the exact behavior of this module.
330
+ ///
331
+ /// See the documentation for `torch::nn::ConstantPad2dOptions` class to learn
332
+ /// what constructor arguments are supported for this module.
333
+ ///
334
+ /// Example:
335
+ /// ```
336
+ /// ConstantPad2d model(ConstantPad2dOptions({3, 0, 2, 1}, 3.5));
337
+ /// ```
338
+ class TORCH_API ConstantPad2dImpl
339
+ : public ConstantPadImpl<2, ConstantPad2dImpl> {
340
+ public:
341
+ using ConstantPadImpl<2, ConstantPad2dImpl>::ConstantPadImpl;
342
+ };
343
+
344
+ /// A `ModuleHolder` subclass for `ConstantPad2dImpl`.
345
+ /// See the documentation for `ConstantPad2dImpl` class to learn what methods it
346
+ /// provides, and examples of how to use `ConstantPad2d` with
347
+ /// `torch::nn::ConstantPad2dOptions`. See the documentation for `ModuleHolder`
348
+ /// to learn about PyTorch's module storage semantics.
349
+ TORCH_MODULE(ConstantPad2d);
350
+
351
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConstantPad3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
352
+
353
+ /// Applies ConstantPad over a 3-D input.
354
+ /// See https://pytorch.org/docs/master/nn.html#torch.nn.ConstantPad3d to learn
355
+ /// about the exact behavior of this module.
356
+ ///
357
+ /// See the documentation for `torch::nn::ConstantPad3dOptions` class to learn
358
+ /// what constructor arguments are supported for this module.
359
+ ///
360
+ /// Example:
361
+ /// ```
362
+ /// ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2}, 3.5));
363
+ /// ```
364
+ class TORCH_API ConstantPad3dImpl
365
+ : public ConstantPadImpl<3, ConstantPad3dImpl> {
366
+ public:
367
+ using ConstantPadImpl<3, ConstantPad3dImpl>::ConstantPadImpl;
368
+ };
369
+
370
+ /// A `ModuleHolder` subclass for `ConstantPad3dImpl`.
371
+ /// See the documentation for `ConstantPad3dImpl` class to learn what methods it
372
+ /// provides, and examples of how to use `ConstantPad3d` with
373
+ /// `torch::nn::ConstantPad3dOptions`. See the documentation for `ModuleHolder`
374
+ /// to learn about PyTorch's module storage semantics.
375
+ TORCH_MODULE(ConstantPad3d);
376
+
377
+ } // namespace nn
378
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/pixelshuffle.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/functional/pixelshuffle.h>
5
+ #include <torch/nn/options/pixelshuffle.h>
6
+
7
+ #include <torch/csrc/Export.h>
8
+
9
+ namespace torch {
10
+ namespace nn {
11
+
12
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PixelShuffle
13
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14
+
15
+ /// Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
16
+ /// to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an
17
+ /// upscale factor. See
18
+ /// https://pytorch.org/docs/master/nn.html#torch.nn.PixelShuffle to learn about
19
+ /// the exact behavior of this module.
20
+ ///
21
+ /// See the documentation for `torch::nn::PixelShuffleOptions` class to learn
22
+ /// what constructor arguments are supported for this module.
23
+ ///
24
+ /// Example:
25
+ /// ```
26
+ /// PixelShuffle model(PixelShuffleOptions(5));
27
+ /// ```
28
+ struct TORCH_API PixelShuffleImpl
29
+ : public torch::nn::Cloneable<PixelShuffleImpl> {
30
+ explicit PixelShuffleImpl(const PixelShuffleOptions& options_);
31
+
32
+ /// Pretty prints the `PixelShuffle` module into the given `stream`.
33
+ void pretty_print(std::ostream& stream) const override;
34
+
35
+ Tensor forward(const Tensor& input);
36
+
37
+ void reset() override;
38
+
39
+ /// The options with which this `Module` was constructed.
40
+ PixelShuffleOptions options;
41
+ };
42
+
43
+ /// A `ModuleHolder` subclass for `PixelShuffleImpl`.
44
+ /// See the documentation for `PixelShuffleImpl` class to learn what methods it
45
+ /// provides, and examples of how to use `PixelShuffle` with
46
+ /// `torch::nn::PixelShuffleOptions`. See the documentation for `ModuleHolder`
47
+ /// to learn about PyTorch's module storage semantics.
48
+ TORCH_MODULE(PixelShuffle);
49
+
50
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PixelUnshuffle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
51
+
52
+ /// Reverses the PixelShuffle operation by rearranging elements in a tensor of
53
+ /// shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape :math:`(*,
54
+ /// C \times r^2, H, W)`, where r is a downscale factor. See
55
+ /// https://pytorch.org/docs/master/nn.html#torch.nn.PixelUnshuffle to learn
56
+ /// about the exact behavior of this module.
57
+ ///
58
+ /// See the documentation for `torch::nn::PixelUnshuffleOptions` class to learn
59
+ /// what constructor arguments are supported for this module.
60
+ ///
61
+ /// Example:
62
+ /// ```
63
+ /// PixelUnshuffle model(PixelUnshuffleOptions(5));
64
+ /// ```
65
+ struct TORCH_API PixelUnshuffleImpl
66
+ : public torch::nn::Cloneable<PixelUnshuffleImpl> {
67
+ explicit PixelUnshuffleImpl(const PixelUnshuffleOptions& options_);
68
+
69
+ /// Pretty prints the `PixelUnshuffle` module into the given `stream`.
70
+ void pretty_print(std::ostream& stream) const override;
71
+
72
+ Tensor forward(const Tensor& input);
73
+
74
+ void reset() override;
75
+
76
+ /// The options with which this `Module` was constructed.
77
+ PixelUnshuffleOptions options;
78
+ };
79
+
80
+ /// A `ModuleHolder` subclass for `PixelUnshuffleImpl`.
81
+ /// See the documentation for `PixelUnshuffleImpl` class to learn what methods
82
+ /// it provides, and examples of how to use `PixelUnshuffle` with
83
+ /// `torch::nn::PixelUnshuffleOptions`. See the documentation for `ModuleHolder`
84
+ /// to learn about PyTorch's module storage semantics.
85
+ TORCH_MODULE(PixelUnshuffle);
86
+
87
+ } // namespace nn
88
+ } // namespace torch