applied-ai-018 commited on
Commit
28fb145
·
verified ·
1 Parent(s): 3202bc1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/10.attention.dense.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step20/zero/10.attention.dense.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step20/zero/10.attention.query_key_value.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step20/zero/26.attention.dense.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step20/zero/26.attention.dense.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step20/zero/6.input_layernorm.weight/exp_avg.pt +3 -0
  7. ckpts/universal/global_step20/zero/6.input_layernorm.weight/exp_avg_sq.pt +3 -0
  8. ckpts/universal/global_step20/zero/6.input_layernorm.weight/fp32.pt +3 -0
  9. ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  10. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_module_holder.h +133 -0
  11. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterdict.h +148 -0
  12. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterlist.h +169 -0
  13. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/sequential.h +390 -0
  14. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/activation.h +714 -0
  15. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/adaptive.h +41 -0
  16. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/batchnorm.h +95 -0
  17. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/conv.h +415 -0
  18. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/distance.h +71 -0
  19. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/dropout.h +130 -0
  20. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/embedding.h +242 -0
  21. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/fold.h +99 -0
  22. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/instancenorm.h +89 -0
  23. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h +95 -0
  24. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h +802 -0
  25. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/normalization.h +192 -0
  26. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/padding.h +219 -0
  27. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pixelshuffle.h +65 -0
  28. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pooling.h +596 -0
  29. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/rnn.h +236 -0
  30. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h +64 -0
  31. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformercoder.h +76 -0
  32. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformerlayer.h +72 -0
  33. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/upsampling.h +110 -0
  34. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/vision.h +36 -0
  35. venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cache_entry.h +69 -0
  36. venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/compiled_autograd.h +713 -0
  37. venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpp_shim.h +15 -0
  38. venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpython_defs.h +21 -0
  39. venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/debug_macros.h +46 -0
  40. venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/eval_frame.h +6 -0
  41. venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/extra_state.h +146 -0
  42. venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/guards.h +4 -0
  43. venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/init.h +13 -0
  44. venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/python_compiled_autograd.h +7 -0
  45. venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/utils.h +9 -0
  46. venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/byte_order.h +227 -0
  47. venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h +8 -0
  48. venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h +15 -0
  49. venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/device_lazy_init.h +48 -0
  50. venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/disable_torch_function.h +42 -0
ckpts/universal/global_step20/zero/10.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbdf1fb9a5b8ac37ddfed837889c081b64a5db654e8a9ef78f3b22987d43e9c8
3
+ size 16778411
ckpts/universal/global_step20/zero/10.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e13e8b70a66c4cab27f47edf14ac5f91abd7cc2d83249f10dcce09eaeb30cdb4
3
+ size 16778317
ckpts/universal/global_step20/zero/10.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:917ee73a5297f3461284b91cf58a301a61fd8a42ba6ac69c2e1cf2774274e5d8
3
+ size 50332749
ckpts/universal/global_step20/zero/26.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3415c95ef414a2ccd8aa1150c87eb768868935f8a629cb7c06d6e8cb30700ac7
3
+ size 16778396
ckpts/universal/global_step20/zero/26.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd12c3657bc7194a6b9c0c827263e4c4dc56ad9f9044cd78b974d8aca8acdb42
3
+ size 16778411
ckpts/universal/global_step20/zero/6.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:342b79bfd9865a5a64b86187205d4fb334acbfe992387aab3da642fd1afce0da
3
+ size 9372
ckpts/universal/global_step20/zero/6.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aad901732dc8516cc5e419426f1c816e354ad6230a64ac8307015d0e7c0226f5
3
+ size 9387
ckpts/universal/global_step20/zero/6.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:163058f261d0ef55e08bf5ceb82343469521053139a2642245f2571a149bca76
3
+ size 9293
ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0d72fd4978dfff4812935b6d22239bb4591b2b17d96727f0512dc988edbc3f7
3
+ size 33555627
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_module_holder.h ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/modules/container/any_value.h>
4
+
5
+ namespace torch {
6
+ namespace nn {
7
+
8
+ class Module;
9
+
10
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModulePlaceholder ~~~~~~~~~~~~~~~~~~~~~~~~~~
11
+
12
+ /// The static type of the object we store in the `AnyModule`, which erases
13
+ /// the actual type, but allows us to call `forward()` on the underlying
14
+ /// module.
15
+ struct AnyModulePlaceholder : public AnyValue::Placeholder {
16
+ using AnyValue::Placeholder::Placeholder;
17
+
18
+ /// The "erased" `forward()` method.
19
+ virtual AnyValue forward(std::vector<AnyValue>&& arguments) = 0;
20
+
21
+ /// Returns std::shared_ptr<Module> pointing to the erased module.
22
+ virtual std::shared_ptr<Module> ptr() = 0;
23
+
24
+ /// Returns a `AnyModulePlaceholder` with a shallow copy of this `AnyModule`.
25
+ virtual std::unique_ptr<AnyModulePlaceholder> copy() const = 0;
26
+
27
+ /// Returns a `AnyModulePlaceholder` with a deep copy of this `AnyModule`.
28
+ virtual std::unique_ptr<AnyModulePlaceholder> clone_module(
29
+ optional<Device> device) const = 0;
30
+ };
31
+
32
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModuleHolder ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
33
+
34
+ /// The dynamic type of the object stored in the `AnyModule`. It contains the
35
+ /// concrete instance to which all calls are forwarded. It is parameterized
36
+ /// over the concrete type of the module, and the types of the arguments the
37
+ /// module takes in its `forward()` method.
38
+ template <typename ModuleType, typename... ArgumentTypes>
39
+ struct AnyModuleHolder : public AnyModulePlaceholder {
40
+ /// \internal
41
+ struct CheckedGetter {
42
+ template <typename T>
43
+ decay_t<T>&& operator()(size_t index) {
44
+ AT_ASSERT(index < arguments_.size());
45
+ auto& value = arguments_[index];
46
+ if (auto* maybe_value = value.template try_get<decay_t<T>>()) {
47
+ return std::move(*maybe_value);
48
+ }
49
+ AT_ERROR(
50
+ "Expected argument #",
51
+ index,
52
+ " to be of type ",
53
+ c10::demangle(typeid(T).name()),
54
+ ", but received value of type ",
55
+ c10::demangle(value.type_info().name()));
56
+ }
57
+ std::vector<AnyValue>& arguments_;
58
+ };
59
+
60
+ /// \internal
61
+ struct InvokeForward {
62
+ template <typename... Ts>
63
+ AnyValue operator()(Ts&&... ts) {
64
+ return AnyValue(module_->forward(std::forward<Ts>(ts)...));
65
+ }
66
+ std::shared_ptr<ModuleType>& module_;
67
+ };
68
+
69
+ /// Constructs the `AnyModuleHolder` from a concrete module.
70
+ explicit AnyModuleHolder(std::shared_ptr<ModuleType>&& module_)
71
+ : AnyModulePlaceholder(typeid(ModuleType)), module(std::move(module_)) {}
72
+
73
+ /// Calls `forward()` on the underlying module, casting each `AnyValue` in the
74
+ /// argument vector to a concrete value.
75
+ AnyValue forward(std::vector<AnyValue>&& arguments) override {
76
+ if (module->_forward_has_default_args()) {
77
+ TORCH_CHECK(
78
+ arguments.size() >= module->_forward_num_required_args() &&
79
+ arguments.size() <= sizeof...(ArgumentTypes),
80
+ c10::demangle(type_info.name()),
81
+ "'s forward() method expects at least ",
82
+ module->_forward_num_required_args(),
83
+ " argument(s) and at most ",
84
+ sizeof...(ArgumentTypes),
85
+ " argument(s), but received ",
86
+ arguments.size(),
87
+ ".");
88
+ arguments = std::move(
89
+ module->_forward_populate_default_args(std::move(arguments)));
90
+ } else {
91
+ std::string use_default_args_macro_prompt = " If " +
92
+ c10::demangle(type_info.name()) +
93
+ "'s forward() method has default arguments, " +
94
+ "please make sure the forward() method is declared with a corresponding `FORWARD_HAS_DEFAULT_ARGS` macro.";
95
+ TORCH_CHECK(
96
+ arguments.size() == sizeof...(ArgumentTypes),
97
+ c10::demangle(type_info.name()),
98
+ "'s forward() method expects ",
99
+ sizeof...(ArgumentTypes),
100
+ " argument(s), but received ",
101
+ arguments.size(),
102
+ ".",
103
+ (arguments.size() < sizeof...(ArgumentTypes))
104
+ ? use_default_args_macro_prompt
105
+ : "");
106
+ }
107
+
108
+ // FYI: During invocation of a module's `forward()` method, the values live
109
+ // in the `arguments` vector inside this function.
110
+ return torch::unpack<AnyValue, ArgumentTypes...>(
111
+ InvokeForward{module}, CheckedGetter{arguments});
112
+ }
113
+
114
+ std::shared_ptr<Module> ptr() override {
115
+ return module;
116
+ }
117
+
118
+ std::unique_ptr<AnyModulePlaceholder> copy() const override {
119
+ return std::make_unique<AnyModuleHolder>(*this);
120
+ }
121
+
122
+ std::unique_ptr<AnyModulePlaceholder> clone_module(
123
+ optional<Device> device) const override {
124
+ return std::make_unique<AnyModuleHolder>(
125
+ std::dynamic_pointer_cast<ModuleType>(module->clone(device)));
126
+ }
127
+
128
+ /// The actual concrete module instance.
129
+ std::shared_ptr<ModuleType> module;
130
+ };
131
+
132
+ } // namespace nn
133
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterdict.h ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/pimpl.h>
5
+ #include <torch/ordered_dict.h>
6
+ #include <utility>
7
+ #include <vector>
8
+
9
+ namespace torch {
10
+ namespace nn {
11
+
12
+ class ParameterDictImpl : public Cloneable<ParameterDictImpl> {
13
+ public:
14
+ using Iterator = OrderedDict<std::string, Tensor>::Iterator;
15
+ using ConstIterator = OrderedDict<std::string, Tensor>::ConstIterator;
16
+
17
+ ParameterDictImpl() = default;
18
+
19
+ explicit ParameterDictImpl(
20
+ const torch::OrderedDict<std::string, torch::Tensor>& params) {
21
+ parameters_ = params;
22
+ }
23
+
24
+ /// `reset()` is empty for `ParameterDict`, since it does not have
25
+ /// parameters of its own.
26
+ void reset() override {}
27
+
28
+ /// Pretty prints the `ParameterDict` module into the given `stream`.
29
+ void pretty_print(std::ostream& stream) const override {
30
+ stream << "torch::nn::ParameterDict(" << std::endl;
31
+ for (const auto& pair : parameters_) {
32
+ stream << "(" << pair.key() << ")"
33
+ << ": Parameter containing: [" << pair.value().scalar_type()
34
+ << " of size " << pair.value().sizes() << "]";
35
+ ;
36
+ stream << std::endl;
37
+ }
38
+ stream << ")";
39
+ }
40
+
41
+ /// Insert the parameter along with the key into ParameterDict
42
+ /// The parameter is set to be require grad by default
43
+ Tensor& insert(std::string key, Tensor param) {
44
+ bool requires_grad = param.requires_grad();
45
+ return register_parameter(std::move(key), std::move(param), requires_grad);
46
+ }
47
+
48
+ /// Remove key from the ParameterDict and return its value, throw exception
49
+ /// if the key is not contained. Please check contains(key) before for a
50
+ /// non-throwing access.
51
+ Tensor pop(const std::string& key) {
52
+ torch::Tensor v = parameters_[key];
53
+ parameters_.erase(key);
54
+ return v;
55
+ }
56
+
57
+ /// Return the keys in the dict
58
+ ::std::vector<std::string> keys() const {
59
+ return parameters_.keys();
60
+ }
61
+
62
+ /// Return the Values in the dict
63
+ ::std::vector<torch::Tensor> values() const {
64
+ return parameters_.values();
65
+ }
66
+
67
+ /// Return an iterator to the start of ParameterDict
68
+ Iterator begin() {
69
+ return parameters_.begin();
70
+ }
71
+
72
+ /// Return a const iterator to the start of ParameterDict
73
+ ConstIterator begin() const {
74
+ return parameters_.begin();
75
+ }
76
+
77
+ /// Return an iterator to the end of ParameterDict
78
+ Iterator end() {
79
+ return parameters_.end();
80
+ }
81
+
82
+ /// Return a const iterator to the end of ParameterDict
83
+ ConstIterator end() const {
84
+ return parameters_.end();
85
+ }
86
+
87
+ /// Return the number of items currently stored in the ParameterDict
88
+ size_t size() const noexcept {
89
+ return parameters_.size();
90
+ }
91
+
92
+ /// Return true if the ParameterDict is empty, otherwise return false
93
+ bool empty() const noexcept {
94
+ return parameters_.is_empty();
95
+ }
96
+
97
+ /// Update the ParameterDict with the key-value pairs from
98
+ /// another ParameterDict, overwriting existing key
99
+ template <typename Container>
100
+ void update(const Container& container) {
101
+ for (auto& item : container) {
102
+ parameters_[item.key()] = item.value();
103
+ }
104
+ }
105
+
106
+ /// Remove all parameters in the ParameterDict
107
+ void clear() {
108
+ parameters_.clear();
109
+ }
110
+
111
+ /// Check if the centain parameter with the key in the ParameterDict
112
+ bool contains(const std::string& key) const noexcept {
113
+ return parameters_.contains(key);
114
+ }
115
+
116
+ /// Returns the value associated with the given `key`. Throws an exception if
117
+ /// no such key is stored in the `ParameterDict`. Check contains(key) before
118
+ /// for a non-throwing way of access
119
+ const Tensor& get(const std::string& key) const {
120
+ return parameters_[key];
121
+ }
122
+
123
+ /// Returns the value associated with the given `key`. Throws an exception if
124
+ /// no such key is stored in the `ParameterDict`. Check contains(key) before
125
+ /// for a non-throwing way of access
126
+ Tensor& get(const std::string& key) {
127
+ return parameters_[key];
128
+ }
129
+
130
+ /// Returns the value associated with the given `key`. Throws an exception if
131
+ /// no such key is stored in the `ParameterDict`. Check contains(key) before
132
+ /// for a non-throwing way of access
133
+ Tensor& operator[](const std::string& key) {
134
+ return parameters_[key];
135
+ }
136
+
137
+ /// Returns the value associated with the given `key`. Throws an exception if
138
+ /// no such key is stored in the `ParameterDict`. Check contains(key) before
139
+ /// for a non-throwing way of access
140
+ const Tensor& operator[](const std::string& key) const {
141
+ return parameters_[key];
142
+ }
143
+ };
144
+
145
+ TORCH_MODULE(ParameterDict);
146
+
147
+ } // namespace nn
148
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterlist.h ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/module.h>
5
+
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+ class ParameterListImpl : public Cloneable<ParameterListImpl> {
11
+ public:
12
+ using Iterator = typename std::vector<
13
+ OrderedDict<std::string, torch::Tensor>::Item>::iterator;
14
+ using ConstIterator = typename std::vector<
15
+ OrderedDict<std::string, torch::Tensor>::Item>::const_iterator;
16
+
17
+ ParameterListImpl() = default;
18
+
19
+ /// Constructs the `ParameterList` from a variadic list of ParameterList.
20
+ template <typename... Tensors>
21
+ explicit ParameterListImpl(Tensors&&... params) {
22
+ parameters_.reserve(sizeof...(Tensors));
23
+ push_back_var(std::forward<Tensors>(params)...);
24
+ }
25
+
26
+ template <typename... Tensors>
27
+ explicit ParameterListImpl(const Tensors&... params) {
28
+ parameters_.reserve(sizeof...(Tensors));
29
+ push_back_var(std::forward<Tensors>(params)...);
30
+ }
31
+
32
+ /// `reset()` is empty for `ParameterList`, since it does not have parameters
33
+ /// of its own.
34
+ void reset() override {}
35
+
36
+ /// Pretty prints the `ParameterList` module into the given `stream`.
37
+ void pretty_print(std::ostream& stream) const override {
38
+ stream << "torch::nn::ParameterList(" << std::endl;
39
+ for (const auto& pair : parameters_) {
40
+ stream << "(" << pair.key() << ")"
41
+ << ": Parameter containing: [" << pair.value().scalar_type()
42
+ << " of size " << pair.value().sizes() << "]";
43
+ ;
44
+ stream << std::endl;
45
+ }
46
+ stream << ")";
47
+ }
48
+
49
+ /// push the a given parameter at the end of the list
50
+ void append(torch::Tensor&& param) {
51
+ bool requires_grad = param.requires_grad();
52
+ register_parameter(
53
+ c10::to_string(parameters_.size()), std::move(param), requires_grad);
54
+ }
55
+
56
+ /// push the a given parameter at the end of the list
57
+ void append(const torch::Tensor& param) {
58
+ bool requires_grad = param.requires_grad();
59
+ register_parameter(
60
+ c10::to_string(parameters_.size()), param, requires_grad);
61
+ }
62
+
63
+ /// push the a given parameter at the end of the list
64
+ /// And the key of the pair will be discarded, only the value
65
+ /// will be added into the `ParameterList`
66
+ void append(const OrderedDict<std::string, torch::Tensor>::Item& pair) {
67
+ register_parameter(
68
+ c10::to_string(parameters_.size()),
69
+ pair.value(),
70
+ pair.value().requires_grad());
71
+ }
72
+
73
+ /// extend parameters from a container to the end of the list
74
+ template <typename Container>
75
+ void extend(const Container& container) {
76
+ for (const auto& param : container) {
77
+ append(param);
78
+ }
79
+ }
80
+
81
+ /// Returns an iterator to the start of the ParameterList
82
+ /// the iterator returned will be type of `OrderedDict<std::string,
83
+ /// torch::Tensor>::Item`
84
+ Iterator begin() {
85
+ return parameters_.begin();
86
+ }
87
+
88
+ /// Returns a const iterator to the start of the ParameterList
89
+ /// the iterator returned will be type of `OrderedDict<std::string,
90
+ /// torch::Tensor>::Item`
91
+ ConstIterator begin() const {
92
+ return parameters_.begin();
93
+ }
94
+
95
+ /// Returns an iterator to the end of the ParameterList
96
+ /// the iterator returned will be type of `OrderedDict<std::string,
97
+ /// torch::Tensor>::Item`
98
+ Iterator end() {
99
+ return parameters_.end();
100
+ }
101
+
102
+ /// Returns a const iterator to the end of the ParameterList
103
+ /// the iterator returned will be type of `OrderedDict<std::string,
104
+ /// torch::Tensor>::Item`
105
+ ConstIterator end() const {
106
+ return parameters_.end();
107
+ }
108
+
109
+ /// Returns the value associated with the given `key`. Throws an exception if
110
+ /// no such key is stored in the `ParameterList`. Check contains(key) before
111
+ /// for a non-throwing way of access
112
+ at::Tensor& at(size_t idx) {
113
+ TORCH_CHECK(idx < size(), "Index out of range");
114
+ return parameters_[c10::to_string(idx)];
115
+ }
116
+
117
+ /// Returns the value associated with the given `key`. Throws an exception if
118
+ /// no such key is stored in the `ParameterList`. Check contains(key) before
119
+ /// for a non-throwing way of access
120
+ const at::Tensor& at(size_t idx) const {
121
+ TORCH_CHECK(idx < size(), "Index out of range");
122
+ return parameters_[c10::to_string(idx)];
123
+ }
124
+
125
+ /// Returns the value associated with the given `key`. Throws an exception if
126
+ /// no such key is stored in the `ParameterList`. Check contains(key) before
127
+ /// for a non-throwing way of access
128
+ at::Tensor& operator[](size_t idx) {
129
+ return at(idx);
130
+ }
131
+
132
+ /// Returns the value associated with the given `key`. Throws an exception if
133
+ /// no such key is stored in the `ParameterList`. Check contains(key) before
134
+ /// for a non-throwing way of access
135
+ const at::Tensor& operator[](size_t idx) const {
136
+ return at(idx);
137
+ }
138
+
139
+ /// Return the size of the ParameterList
140
+ size_t size() const noexcept {
141
+ return parameters_.size();
142
+ }
143
+ /// True if the ParameterList is empty
144
+ bool is_empty() const noexcept {
145
+ return parameters_.is_empty();
146
+ }
147
+
148
+ /// Overload the +=, so that two ParameterList could be incrementally added
149
+ template <typename Container>
150
+ Container& operator+=(const Container& other) {
151
+ extend(other);
152
+ return *this;
153
+ }
154
+
155
+ private:
156
+ template <typename Head, typename... Tail>
157
+ void push_back_var(Head&& head, Tail&&... tail) {
158
+ append(std::forward<Head>(head));
159
+ // Recursively calls this method, until the parameter pack only thas this
160
+ // entry left. Then calls `push_back()` a final time (above).
161
+ push_back_var(std::forward<Tail>(tail)...);
162
+ }
163
+
164
+ /// The base case, when the list of modules is empty.
165
+ void push_back_var() {}
166
+ };
167
+ TORCH_MODULE(ParameterList);
168
+ } // namespace nn
169
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/sequential.h ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/detail/static.h>
4
+ #include <torch/nn/cloneable.h>
5
+ #include <torch/nn/module.h>
6
+ #include <torch/nn/modules/container/any.h>
7
+ #include <torch/nn/modules/container/named_any.h>
8
+ #include <torch/nn/pimpl.h>
9
+ #include <torch/types.h>
10
+
11
+ #include <c10/util/Exception.h>
12
+
13
+ #include <cstdint>
14
+ #include <memory>
15
+ #include <ostream>
16
+ #include <string>
17
+ #include <type_traits>
18
+ #include <utility>
19
+ #include <vector>
20
+
21
+ namespace torch {
22
+ namespace nn {
23
+
24
+ /// A list of `Module`s that acts as a `Module` itself.
25
+ ///
26
+ /// A `Sequential` is fundamentally a list of `Module`s, each with a `forward()`
27
+ /// method. `Sequential` provides a `forward()` method of its own, which accepts
28
+ /// any input and forwards it to the first module it stores. It then "chains"
29
+ /// outputs to inputs sequentially for each subsequent module, finally returning
30
+ /// the output of the last module. For example:
31
+ ///
32
+ /// \rst
33
+ /// .. code-block:: cpp
34
+ ///
35
+ /// torch::nn::Sequential seq(
36
+ /// torch::nn::Linear(3, 4),
37
+ /// torch::nn::BatchNorm1d(4),
38
+ /// torch::nn::Dropout(0.5)
39
+ /// );
40
+ ///
41
+ /// auto output = seq->forward(torch::ones(3));
42
+ ///
43
+ /// \endrst
44
+ ///
45
+ /// This can conceptually be thought of as the following loop (using Python as
46
+ /// pseudocode):
47
+ ///
48
+ /// \rst
49
+ /// .. code-block:: python
50
+ ///
51
+ /// def forward(sequential, input):
52
+ /// for module in sequential:
53
+ /// input = module(input)
54
+ /// return input
55
+ ///
56
+ /// \endrst
57
+ ///
58
+ /// Why should you use `Sequential` instead of a simple `std::vector`? The value
59
+ /// a `Sequential` provides over manually calling a sequence of modules is that
60
+ /// it allows treating the whole container *as a single module*, such that
61
+ /// performing a transformation on the `Sequential` applies to each of the
62
+ /// modules it stores (which are each a registered submodule of the
63
+ /// `Sequential`). For example, calling
64
+ /// `.to(torch::kCUDA)` on a `Sequential` will move each module in the list to
65
+ /// CUDA memory. For example:
66
+ ///
67
+ /// \rst
68
+ /// .. code-block:: cpp
69
+ ///
70
+ /// torch::nn::Sequential seq(
71
+ /// torch::nn::Linear(3, 4),
72
+ /// torch::nn::BatchNorm1d(4),
73
+ /// torch::nn::Dropout(0.5)
74
+ /// );
75
+ ///
76
+ /// // Convert all modules to CUDA.
77
+ /// seq->to(torch::kCUDA);
78
+ ///
79
+ /// \endrst
80
+ ///
81
+ /// Finally, `Sequential` provides a lightweight container API, such as allowing
82
+ /// iteration over submodules, positional access, adding a new module after
83
+ /// construction via `push_back`, as well as joining two `Sequential`s via
84
+ /// `extend`.
85
+ ///
86
+ /// \rst
87
+ /// .. attention::
88
+ /// One current limitation of `Sequential` is that all except the first module
89
+ /// must accept a single argument. If your modules need to take multiple
90
+ /// arguments, you should define them to take and return tuples.
91
+ /// \endrst
92
+ class SequentialImpl : public Cloneable<SequentialImpl> {
93
+ public:
94
+ using Iterator = std::vector<AnyModule>::iterator;
95
+ using ConstIterator = std::vector<AnyModule>::const_iterator;
96
+
97
+ SequentialImpl() = default;
98
+
99
+ /// Constructs the `Sequential` from a variadic list of modules.
100
+ template <typename... Modules>
101
+ explicit SequentialImpl(Modules&&... modules) {
102
+ modules_.reserve(sizeof...(Modules));
103
+ push_back(std::forward<Modules>(modules)...);
104
+ }
105
+
106
+ /// Constructs the `Sequential` from an `OrderedDict` of named `AnyModule`s.
107
+ explicit SequentialImpl(
108
+ torch::OrderedDict<std::string, AnyModule>&& ordered_dict) {
109
+ modules_.reserve(ordered_dict.size());
110
+ for (auto& item : ordered_dict) {
111
+ push_back(item.key(), std::move(item.value()));
112
+ }
113
+ }
114
+
115
+ /// Constructs the `Sequential` from a braced-init-list of named `AnyModule`s.
116
+ /// It enables the following use case:
117
+ /// `Sequential sequential({{"m1", M(1)}, {"m2", M(2)}})`
118
+ explicit SequentialImpl(std::initializer_list<NamedAnyModule> named_modules) {
119
+ modules_.reserve(named_modules.size());
120
+ for (const auto& named_module : named_modules) {
121
+ push_back(named_module.name(), named_module.module());
122
+ }
123
+ }
124
+
125
+ /// Special cloning function for `Sequential` because it does not use
126
+ /// `reset()`.
127
+ std::shared_ptr<Module> clone(
128
+ const optional<Device>& device = nullopt) const override {
129
+ auto clone = std::make_shared<SequentialImpl>();
130
+ for (const auto& module : modules_) {
131
+ clone->push_back(module.clone(device));
132
+ }
133
+ return clone;
134
+ }
135
+
136
+ /// `reset()` is empty for `Sequential`, since it does not have parameters of
137
+ /// its own.
138
+ void reset() override {}
139
+
140
+ /// Pretty prints the `Sequential` module into the given `stream`.
141
+ void pretty_print(std::ostream& stream) const override {
142
+ stream << "torch::nn::Sequential";
143
+ }
144
+
145
+ /// Feeds `inputs` to the first module and then chains outputs to inputs,
146
+ /// returning the last output.
147
+ ///
148
+ /// Conceptually the following loop in Python:
149
+ ///
150
+ /// \rst
151
+ /// .. code-block:: python
152
+ ///
153
+ /// def forward(sequential, input):
154
+ /// for module in sequential:
155
+ /// input = module(input)
156
+ /// return input
157
+ ///
158
+ /// \endrst
159
+ ///
160
+ /// The return type is taken as the first template parameter. It defaults to
161
+ /// `Tensor`. If the last module in the `Sequential` returns another type `T`,
162
+ /// you should call `forward<T>(inputs)` instead of just `forward(inputs)`:
163
+ ///
164
+ /// \rst
165
+ /// .. code-block:: cpp
166
+ ///
167
+ /// torch::Tensor tensor = sequential1->forward(inputs);
168
+ /// int integer = sequential2->forward<int>(inputs);
169
+ /// float value = sequential3->forward<float>(inputs);
170
+ ///
171
+ /// \endrst
172
+ template <typename ReturnType = Tensor, typename... InputTypes>
173
+ ReturnType forward(InputTypes&&... inputs) {
174
+ TORCH_CHECK(!is_empty(), "Cannot call forward() on an empty Sequential");
175
+
176
+ auto iterator = modules_.begin();
177
+ auto input = iterator->any_forward(std::forward<InputTypes>(inputs)...);
178
+
179
+ for (++iterator; iterator != modules_.end(); ++iterator) {
180
+ input = iterator->any_forward(std::move(input));
181
+ }
182
+
183
+ // Check the return value and give a nice error message if the requested
184
+ // return type was incorrect.
185
+ if (auto* return_value = input.template try_get<ReturnType>()) {
186
+ return std::move(*return_value);
187
+ }
188
+ AT_ERROR(
189
+ "The type of the return value is ",
190
+ c10::demangle(input.type_info().name()),
191
+ ", but you asked for type ",
192
+ c10::demangle(typeid(ReturnType).name()));
193
+ }
194
+
195
+ /// Adds a new (boxed) `Module` to the `Sequential` container.
196
+ template <typename ModuleType>
197
+ void push_back(std::shared_ptr<ModuleType> module_ptr) {
198
+ push_back(c10::to_string(modules_.size()), std::move(module_ptr));
199
+ }
200
+
201
+ /// Adds a new named (boxed) `Module` to the `Sequential` container.
202
+ template <typename ModuleType>
203
+ void push_back(std::string name, std::shared_ptr<ModuleType> module_ptr) {
204
+ push_back(std::move(name), AnyModule(std::move(module_ptr)));
205
+ }
206
+
207
+ /// Adds a new `Module` to the `Sequential` container, moving or copying it
208
+ /// into a `shared_ptr` internally. This method allows passing value types,
209
+ /// and letting the container deal with the boxing. This means you can write
210
+ /// `Sequential(Module(3, 4))` instead of
211
+ /// `Sequential(std::make_shared<Module>(3, 4))`.
212
+ template <typename M, typename = torch::detail::enable_if_module_t<M>>
213
+ void push_back(M&& module) {
214
+ push_back(c10::to_string(modules_.size()), std::forward<M>(module));
215
+ }
216
+
217
+ /// Adds a new named `Module` to the `Sequential` container, moving or copying
218
+ /// it into a `shared_ptr` internally. This method allows passing value types,
219
+ /// and letting the container deal with the boxing.
220
+ template <typename M, typename = torch::detail::enable_if_module_t<M>>
221
+ void push_back(std::string name, M&& module) {
222
+ using Type = typename std::remove_reference<M>::type;
223
+ push_back(std::move(name), std::make_shared<Type>(std::forward<M>(module)));
224
+ }
225
+
226
+ /// Unwraps the contained module of a `ModuleHolder` and adds it to the
227
+ /// `Sequential`.
228
+ template <typename M>
229
+ void push_back(const ModuleHolder<M>& module_holder) {
230
+ push_back(c10::to_string(modules_.size()), module_holder);
231
+ }
232
+
233
+ /// Unwraps the contained named module of a `ModuleHolder` and adds it to the
234
+ /// `Sequential`.
235
+ template <typename M>
236
+ void push_back(std::string name, const ModuleHolder<M>& module_holder) {
237
+ push_back(std::move(name), module_holder.ptr());
238
+ }
239
+
240
+ /// Iterates over the container and calls `push_back()` on each value.
241
+ template <typename Container>
242
+ void extend(const Container& container) {
243
+ for (const auto& module : container) {
244
+ push_back(module);
245
+ }
246
+ }
247
+
248
+ /// Adds a type-erased `AnyModule` to the `Sequential`.
249
+ void push_back(AnyModule any_module) {
250
+ push_back(c10::to_string(modules_.size()), std::move(any_module));
251
+ }
252
+
253
+ void push_back(std::string name, AnyModule any_module) {
254
+ modules_.push_back(std::move(any_module));
255
+ const auto index = modules_.size() - 1;
256
+ register_module(std::move(name), modules_[index].ptr());
257
+ }
258
+
259
+ /// Returns an iterator to the start of the `Sequential`.
260
+ Iterator begin() {
261
+ return modules_.begin();
262
+ }
263
+
264
+ /// Returns a const iterator to the start of the `Sequential`.
265
+ ConstIterator begin() const {
266
+ return modules_.begin();
267
+ }
268
+
269
+ /// Returns an iterator to the end of the `Sequential`.
270
+ Iterator end() {
271
+ return modules_.end();
272
+ }
273
+
274
+ /// Returns a const iterator to the end of the `Sequential`.
275
+ ConstIterator end() const {
276
+ return modules_.end();
277
+ }
278
+
279
+ /// Attempts to return the module at the given index as the requested type.
280
+ /// Throws an exception if the index is out of bounds or the types do not
281
+ /// match.
282
+ template <typename T>
283
+ T& at(size_t index) {
284
+ static_assert(
285
+ torch::detail::is_module<T>::value,
286
+ "Can only call Sequential::at with an nn::Module type");
287
+ TORCH_CHECK(index < size(), "Index out of range");
288
+ return modules_[index].get<T>();
289
+ }
290
+
291
+ /// Attempts to return the module at the given index as the requested type.
292
+ /// Throws an exception if the index is out of bounds or the types do not
293
+ /// match.
294
+ template <typename T>
295
+ const T& at(size_t index) const {
296
+ static_assert(
297
+ torch::detail::is_module<T>::value,
298
+ "Can only call Sequential::at with an nn::Module type");
299
+ TORCH_CHECK(index < size(), "Index out of range");
300
+ return modules_[index].get<T>();
301
+ }
302
+
303
+ /// Attempts to return a `std::shared_ptr` whose dynamic type is that of the
304
+ /// underlying module at the given index. Throws an exception if the index is
305
+ /// out of bounds.
306
+ std::shared_ptr<Module> ptr(size_t index) const {
307
+ TORCH_CHECK(index < size(), "Index out of range");
308
+ return modules_[index].ptr();
309
+ }
310
+
311
+ /// Attempts to return a `std::shared_ptr` whose type is the one provided.
312
+ /// Throws an exception if the index is out of bounds or the types do not
313
+ /// match.
314
+ template <typename T>
315
+ std::shared_ptr<T> ptr(size_t index) const {
316
+ static_assert(
317
+ torch::detail::is_module<T>::value,
318
+ "Can only call Sequential::ptr with an nn::Module type");
319
+ TORCH_CHECK(index < size(), "Index out of range");
320
+ return modules_[index].ptr<T>();
321
+ }
322
+
323
+ /// Like `ptr(index)`.
324
+ std::shared_ptr<Module> operator[](size_t index) const {
325
+ // This is the only method we can call without a type.
326
+ return ptr(index);
327
+ }
328
+
329
+ /// The current size of the `Sequential` container.
330
+ size_t size() const noexcept {
331
+ return modules_.size();
332
+ }
333
+
334
+ /// True if there are no modules in the `Sequential`.
335
+ bool is_empty() const noexcept {
336
+ return size() == 0;
337
+ }
338
+
339
+ private:
340
+ /// Takes a First *and* Second parameter, to avoid ambiguity when a parameter
341
+ /// pack has only one type, in which case the template would be preferred,
342
+ /// even if the other `push_back` functions are better fits (e.g. `unique_ptr`
343
+ /// -> `shared_ptr` overload).
344
+ /// NOTE: We explicitly avoid matching this template with
345
+ /// `push_back(std::string("name"), module)` or `push_back("name", module)`,
346
+ /// since they should be handled by their respective `push_back` functions.
347
+ template <
348
+ typename First,
349
+ typename Second,
350
+ typename... Rest,
351
+ typename = torch::disable_if_t<
352
+ std::is_same<First, std::string>::value ||
353
+ // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
354
+ std::is_same<
355
+ typename std::decay<First>::type,
356
+ std::decay<const char (&)[]>::type>::value>>
357
+ void push_back(First&& first, Second&& second, Rest&&... rest) {
358
+ push_back(std::forward<First>(first));
359
+ // Recursively calls this method, until the parameter pack only thas this
360
+ // entry left. Then calls `push_back()` a final time (above).
361
+ push_back(std::forward<Second>(second), std::forward<Rest>(rest)...);
362
+ }
363
+
364
+ /// The base case, when the list of modules is empty.
365
+ void push_back() {}
366
+
367
+ // Box the AnyModules to give Sequential reference semantics, like the rest of
368
+ // the API. Note that this is not required otherwise, this could just be a
369
+ // `vector<AnyModule>`.
370
+ std::vector<AnyModule> modules_;
371
+ };
372
+
373
+ /// A `ModuleHolder` subclass for `SequentialImpl`.
374
+ /// See the documentation for `SequentialImpl` class to learn what methods it
375
+ /// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
376
+ /// module storage semantics.
377
+ class Sequential : public torch::nn::ModuleHolder<SequentialImpl> {
378
+ public:
379
+ using torch::nn::ModuleHolder<SequentialImpl>::ModuleHolder;
380
+
381
+ Sequential() : ModuleHolder() {}
382
+
383
+ /// Constructs the `Sequential` from a braced-init-list of named `AnyModule`s.
384
+ /// It enables the following use case:
385
+ /// `Sequential sequential({{"m1", M(1)}, {"m2", M(2)}})`
386
+ Sequential(std::initializer_list<NamedAnyModule> named_modules)
387
+ : ModuleHolder(std::make_shared<SequentialImpl>(named_modules)) {}
388
+ };
389
+ } // namespace nn
390
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/activation.h ADDED
@@ -0,0 +1,714 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/enum.h>
6
+ #include <torch/types.h>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+
11
+ /// Options for the `ELU` module.
12
+ ///
13
+ /// Example:
14
+ /// ```
15
+ /// ELU model(ELUOptions().alpha(42.42).inplace(true));
16
+ /// ```
17
+ struct TORCH_API ELUOptions {
18
+ /// The `alpha` value for the ELU formulation. Default: 1.0
19
+ TORCH_ARG(double, alpha) = 1.0;
20
+
21
+ /// can optionally do the operation in-place. Default: False
22
+ TORCH_ARG(bool, inplace) = false;
23
+ };
24
+
25
+ namespace functional {
26
+ /// Options for `torch::nn::functional::elu`.
27
+ ///
28
+ /// See the documentation for `torch::nn::ELUOptions` class to learn what
29
+ /// arguments are supported.
30
+ ///
31
+ /// Example:
32
+ /// ```
33
+ /// namespace F = torch::nn::functional;
34
+ /// F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true));
35
+ /// ```
36
+ using ELUFuncOptions = ELUOptions;
37
+ } // namespace functional
38
+
39
+ // ============================================================================
40
+
41
+ /// Options for the `SELU` module.
42
+ ///
43
+ /// Example:
44
+ /// ```
45
+ /// SELU model(SELUOptions().inplace(true));
46
+ /// ```
47
+ struct TORCH_API SELUOptions {
48
+ /* implicit */ SELUOptions(bool inplace = false);
49
+
50
+ /// can optionally do the operation in-place. Default: False
51
+ TORCH_ARG(bool, inplace);
52
+ };
53
+
54
+ namespace functional {
55
+ /// Options for `torch::nn::functional::selu`.
56
+ ///
57
+ /// See the documentation for `torch::nn::SELUOptions` class to learn what
58
+ /// arguments are supported.
59
+ ///
60
+ /// Example:
61
+ /// ```
62
+ /// namespace F = torch::nn::functional;
63
+ /// F::selu(input, F::SELUFuncOptions(false));
64
+ /// ```
65
+ using SELUFuncOptions = SELUOptions;
66
+ } // namespace functional
67
+
68
+ // ============================================================================
69
+
70
+ /// Options for the `GLU` module.
71
+ ///
72
+ /// Example:
73
+ /// ```
74
+ /// GLU model(GLUOptions(1));
75
+ /// ```
76
+ struct TORCH_API GLUOptions {
77
+ /* implicit */ GLUOptions(int64_t dim = -1);
78
+
79
+ /// the dimension on which to split the input. Default: -1
80
+ TORCH_ARG(int64_t, dim);
81
+ };
82
+
83
+ namespace functional {
84
+ /// Options for `torch::nn::functional::glu`.
85
+ ///
86
+ /// See the documentation for `torch::nn::GLUOptions` class to learn what
87
+ /// arguments are supported.
88
+ ///
89
+ /// Example:
90
+ /// ```
91
+ /// namespace F = torch::nn::functional;
92
+ /// F::glu(input, GLUFuncOptions(1));
93
+ /// ```
94
+ using GLUFuncOptions = GLUOptions;
95
+ } // namespace functional
96
+
97
+ // ============================================================================
98
+
99
+ /// Options for the `GELU` module.
100
+ ///
101
+ /// Example:
102
+ /// ```
103
+ /// GELU model(GELUOptions().approximate("none"));
104
+ /// ```
105
+ struct TORCH_API GELUOptions {
106
+ /// Specifies the approximation to apply to the output.
107
+ TORCH_ARG(std::string, approximate) = "none";
108
+ };
109
+
110
+ namespace functional {
111
+ /// Options for `torch::nn::functional::gelu`.
112
+ ///
113
+ /// See the documentation for `torch::nn::GELUOptions` class to learn what
114
+ /// arguments are supported.
115
+ ///
116
+ /// Example:
117
+ /// ```
118
+ /// namespace F = torch::nn::functional;
119
+ /// F::gelu(input, F::GELUFuncOptions().approximate("none"));
120
+ /// ```
121
+ using GELUFuncOptions = GELUOptions;
122
+ } // namespace functional
123
+
124
+ // ============================================================================
125
+
126
+ /// Options for the `Hardshrink` module.
127
+ ///
128
+ /// Example:
129
+ /// ```
130
+ /// Hardshrink model(HardshrinkOptions().lambda(42.42));
131
+ /// ```
132
+ struct TORCH_API HardshrinkOptions {
133
+ /* implicit */ HardshrinkOptions(double lambda = 0.5);
134
+
135
+ /// the `lambda` value for the Hardshrink formulation. Default: 0.5
136
+ TORCH_ARG(double, lambda);
137
+ };
138
+
139
+ namespace functional {
140
+ /// Options for `torch::nn::functional::hardshrink`.
141
+ ///
142
+ /// See the documentation for `torch::nn::HardshrinkOptions` class to learn what
143
+ /// arguments are supported.
144
+ ///
145
+ /// Example:
146
+ /// ```
147
+ /// namespace F = torch::nn::functional;
148
+ /// F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42));
149
+ /// ```
150
+ using HardshrinkFuncOptions = HardshrinkOptions;
151
+ } // namespace functional
152
+
153
+ // ============================================================================
154
+
155
+ /// Options for the `Hardtanh` module.
156
+ ///
157
+ /// Example:
158
+ /// ```
159
+ /// Hardtanh
160
+ /// model(HardtanhOptions().min_val(-42.42).max_val(0.42).inplace(true));
161
+ /// ```
162
+ struct TORCH_API HardtanhOptions {
163
+ /// minimum value of the linear region range. Default: -1
164
+ TORCH_ARG(double, min_val) = -1.0;
165
+
166
+ /// maximum value of the linear region range. Default: 1
167
+ TORCH_ARG(double, max_val) = 1.0;
168
+
169
+ /// can optionally do the operation in-place. Default: False
170
+ TORCH_ARG(bool, inplace) = false;
171
+ };
172
+
173
+ namespace functional {
174
+ /// Options for `torch::nn::functional::hardtanh`.
175
+ ///
176
+ /// See the documentation for `torch::nn::HardtanhOptions` class to learn what
177
+ /// arguments are supported.
178
+ ///
179
+ /// Example:
180
+ /// ```
181
+ /// namespace F = torch::nn::functional;
182
+ /// F::hardtanh(x,
183
+ /// F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true));
184
+ /// ```
185
+ using HardtanhFuncOptions = HardtanhOptions;
186
+ } // namespace functional
187
+
188
+ // ============================================================================
189
+
190
+ /// Options for the `LeakyReLU` module.
191
+ ///
192
+ /// Example:
193
+ /// ```
194
+ /// LeakyReLU model(LeakyReLUOptions().negative_slope(0.42).inplace(true));
195
+ /// ```
196
+ struct TORCH_API LeakyReLUOptions {
197
+ /// Controls the angle of the negative slope. Default: 1e-2
198
+ TORCH_ARG(double, negative_slope) = 1e-2;
199
+
200
+ /// can optionally do the operation in-place. Default: False
201
+ TORCH_ARG(bool, inplace) = false;
202
+ };
203
+
204
+ namespace functional {
205
+ /// Options for `torch::nn::functional::leaky_relu`.
206
+ ///
207
+ /// See the documentation for `torch::nn::LeakyReLUOptions` class to learn what
208
+ /// arguments are supported.
209
+ ///
210
+ /// Example:
211
+ /// ```
212
+ /// namespace F = torch::nn::functional;
213
+ /// F::leaky_relu(x,
214
+ /// F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true));
215
+ /// ```
216
+ using LeakyReLUFuncOptions = LeakyReLUOptions;
217
+ } // namespace functional
218
+
219
+ // ============================================================================
220
+
221
+ /// Options for the `Softmax` module.
222
+ ///
223
+ /// Example:
224
+ /// ```
225
+ /// Softmax model(SoftmaxOptions(1));
226
+ /// ```
227
+ struct TORCH_API SoftmaxOptions {
228
+ SoftmaxOptions(int64_t dim);
229
+
230
+ /// Dimension along which Softmax will be computed.
231
+ TORCH_ARG(int64_t, dim);
232
+ };
233
+
234
+ // ============================================================================
235
+
236
+ namespace functional {
237
+
238
+ /// Options for `torch::nn::functional::softmax`.
239
+ ///
240
+ /// Example:
241
+ /// ```
242
+ /// namespace F = torch::nn::functional;
243
+ /// F::softmax(input, F::SoftmaxFuncOptions(1));
244
+ /// ```
245
+ struct TORCH_API SoftmaxFuncOptions {
246
+ SoftmaxFuncOptions(int64_t dim);
247
+
248
+ /// Dimension along which Softmax will be computed.
249
+ TORCH_ARG(int64_t, dim);
250
+
251
+ /// the desired data type of returned tensor.
252
+ /// If specified, the input tensor is casted to `dtype` before the operation
253
+ /// is performed. This is useful for preventing data type overflows. Default:
254
+ /// None.
255
+ TORCH_ARG(c10::optional<torch::Dtype>, dtype) = c10::nullopt;
256
+ };
257
+
258
+ } // namespace functional
259
+
260
+ // ============================================================================
261
+
262
+ /// Options for the `Softmin` module.
263
+ ///
264
+ /// Example:
265
+ /// ```
266
+ /// Softmin model(SoftminOptions(1));
267
+ /// ```
268
+ struct TORCH_API SoftminOptions {
269
+ SoftminOptions(int64_t dim);
270
+
271
+ /// Dimension along which Softmin will be computed.
272
+ TORCH_ARG(int64_t, dim);
273
+ };
274
+
275
+ // ============================================================================
276
+
277
+ namespace functional {
278
+
279
+ /// Options for `torch::nn::functional::softmin`.
280
+ ///
281
+ /// Example:
282
+ /// ```
283
+ /// namespace F = torch::nn::functional;
284
+ /// F::softmin(input, F::SoftminFuncOptions(1));
285
+ /// ```
286
+ struct TORCH_API SoftminFuncOptions {
287
+ SoftminFuncOptions(int64_t dim);
288
+
289
+ /// Dimension along which Softmin will be computed.
290
+ TORCH_ARG(int64_t, dim);
291
+
292
+ /// the desired data type of returned tensor.
293
+ /// If specified, the input tensor is casted to `dtype` before the operation
294
+ /// is performed. This is useful for preventing data type overflows. Default:
295
+ /// None.
296
+ TORCH_ARG(c10::optional<torch::Dtype>, dtype) = c10::nullopt;
297
+ };
298
+
299
+ } // namespace functional
300
+
301
+ // ============================================================================
302
+
303
+ /// Options for the `LogSoftmax` module.
304
+ ///
305
+ /// Example:
306
+ /// ```
307
+ /// LogSoftmax model(LogSoftmaxOptions(1));
308
+ /// ```
309
+ struct TORCH_API LogSoftmaxOptions {
310
+ LogSoftmaxOptions(int64_t dim);
311
+
312
+ /// Dimension along which LogSoftmax will be computed.
313
+ TORCH_ARG(int64_t, dim);
314
+ };
315
+
316
+ // ============================================================================
317
+
318
+ namespace functional {
319
+
320
+ /// Options for `torch::nn::functional::log_softmax`.
321
+ ///
322
+ /// Example:
323
+ /// ```
324
+ /// namespace F = torch::nn::functional;
325
+ /// F::log_softmax(input, LogSoftmaxFuncOptions(1));
326
+ /// ```
327
+ struct TORCH_API LogSoftmaxFuncOptions {
328
+ LogSoftmaxFuncOptions(int64_t dim);
329
+
330
+ /// Dimension along which LogSoftmax will be computed.
331
+ TORCH_ARG(int64_t, dim);
332
+
333
+ /// the desired data type of returned tensor.
334
+ /// If specified, the input tensor is casted to `dtype` before the operation
335
+ /// is performed. This is useful for preventing data type overflows. Default:
336
+ /// None.
337
+ TORCH_ARG(c10::optional<torch::Dtype>, dtype) = c10::nullopt;
338
+ };
339
+
340
+ } // namespace functional
341
+
342
+ // ============================================================================
343
+
344
+ /// Options for the `PReLU` module.
345
+ ///
346
+ /// Example:
347
+ /// ```
348
+ /// PReLU model(PReLUOptions().num_parameters(42));
349
+ /// ```
350
+ struct TORCH_API PReLUOptions {
351
+ /// number of `a` to learn. Although it takes an int as input, there is only
352
+ /// two values are legitimate: 1, or the number of channels at input. Default:
353
+ /// 1
354
+ TORCH_ARG(int64_t, num_parameters) = 1;
355
+
356
+ /// the initial value of `a`. Default: 0.25
357
+ TORCH_ARG(double, init) = 0.25;
358
+ };
359
+
360
+ // ============================================================================
361
+
362
+ /// Options for the `ReLU` module.
363
+ ///
364
+ /// Example:
365
+ /// ```
366
+ /// ReLU model(ReLUOptions().inplace(true));
367
+ /// ```
368
+ struct TORCH_API ReLUOptions {
369
+ /* implicit */ ReLUOptions(bool inplace = false);
370
+
371
+ /// can optionally do the operation in-place. Default: False
372
+ TORCH_ARG(bool, inplace);
373
+ };
374
+
375
+ namespace functional {
376
+ /// Options for `torch::nn::functional::relu`.
377
+ ///
378
+ /// See the documentation for `torch::nn::ReLUOptions` class to learn what
379
+ /// arguments are supported.
380
+ ///
381
+ /// Example:
382
+ /// ```
383
+ /// namespace F = torch::nn::functional;
384
+ /// F::relu(x, F::ReLUFuncOptions().inplace(true));
385
+ /// ```
386
+ using ReLUFuncOptions = ReLUOptions;
387
+ } // namespace functional
388
+
389
+ // ============================================================================
390
+
391
+ /// Options for the `ReLU6` module.
392
+ ///
393
+ /// Example:
394
+ /// ```
395
+ /// ReLU6 model(ReLU6Options().inplace(true));
396
+ /// ```
397
+ struct TORCH_API ReLU6Options {
398
+ /* implicit */ ReLU6Options(bool inplace = false);
399
+
400
+ /// can optionally do the operation in-place. Default: False
401
+ TORCH_ARG(bool, inplace);
402
+ };
403
+
404
+ namespace functional {
405
+ /// Options for `torch::nn::functional::relu6`.
406
+ ///
407
+ /// See the documentation for `torch::nn::ReLU6Options` class to learn what
408
+ /// arguments are supported.
409
+ ///
410
+ /// Example:
411
+ /// ```
412
+ /// namespace F = torch::nn::functional;
413
+ /// F::relu6(x, F::ReLU6FuncOptions().inplace(true));
414
+ /// ```
415
+ using ReLU6FuncOptions = ReLU6Options;
416
+ } // namespace functional
417
+
418
+ // ============================================================================
419
+
420
+ /// Options for the `RReLU` module.
421
+ ///
422
+ /// Example:
423
+ /// ```
424
+ /// RReLU model(RReLUOptions().lower(0.24).upper(0.42).inplace(true));
425
+ /// ```
426
+ struct TORCH_API RReLUOptions {
427
+ /// lower bound of the uniform distribution. Default: 1/8
428
+ TORCH_ARG(double, lower) = 1.0 / 8.0;
429
+
430
+ /// upper bound of the uniform distribution. Default: 1/3
431
+ TORCH_ARG(double, upper) = 1.0 / 3.0;
432
+
433
+ /// can optionally do the operation in-place. Default: False
434
+ TORCH_ARG(bool, inplace) = false;
435
+ };
436
+
437
+ // ============================================================================
438
+
439
+ namespace functional {
440
+
441
+ /// Options for `torch::nn::functional::rrelu`.
442
+ ///
443
+ /// Example:
444
+ /// ```
445
+ /// namespace F = torch::nn::functional;
446
+ /// F::rrelu(x, F::RReLUFuncOptions().lower(0.1).upper(0.4).inplace(true));
447
+ /// ```
448
+ struct TORCH_API RReLUFuncOptions {
449
+ /// lower bound of the uniform distribution. Default: 1/8
450
+ TORCH_ARG(double, lower) = 1.0 / 8.0;
451
+
452
+ /// upper bound of the uniform distribution. Default: 1/3
453
+ TORCH_ARG(double, upper) = 1.0 / 3.0;
454
+
455
+ TORCH_ARG(bool, training) = false;
456
+
457
+ /// can optionally do the operation in-place. Default: False
458
+ TORCH_ARG(bool, inplace) = false;
459
+ };
460
+
461
+ } // namespace functional
462
+
463
+ // ============================================================================
464
+
465
+ /// Options for the `CELU` module.
466
+ ///
467
+ /// Example:
468
+ /// ```
469
+ /// CELU model(CELUOptions().alpha(42.42).inplace(true));
470
+ /// ```
471
+ struct TORCH_API CELUOptions {
472
+ /// The `alpha` value for the CELU formulation. Default: 1.0
473
+ TORCH_ARG(double, alpha) = 1.0;
474
+
475
+ /// can optionally do the operation in-place. Default: False
476
+ TORCH_ARG(bool, inplace) = false;
477
+ };
478
+
479
+ namespace functional {
480
+ /// Options for `torch::nn::functional::celu`.
481
+ ///
482
+ /// See the documentation for `torch::nn::CELUOptions` class to learn what
483
+ /// arguments are supported.
484
+ ///
485
+ /// Example:
486
+ /// ```
487
+ /// namespace F = torch::nn::functional;
488
+ /// F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true));
489
+ /// ```
490
+ using CELUFuncOptions = CELUOptions;
491
+ } // namespace functional
492
+
493
+ // ============================================================================
494
+
495
+ /// Options for the `Softplus` module.
496
+ ///
497
+ /// Example:
498
+ /// ```
499
+ /// Softplus model(SoftplusOptions().beta(0.24).threshold(42.42));
500
+ /// ```
501
+ struct TORCH_API SoftplusOptions {
502
+ /// the `beta` value for the Softplus formulation. Default: 1
503
+ TORCH_ARG(double, beta) = 1.0;
504
+
505
+ /// values above this revert to a linear function. Default: 20
506
+ TORCH_ARG(double, threshold) = 20.0;
507
+ };
508
+
509
+ namespace functional {
510
+ /// Options for `torch::nn::functional::softplus`.
511
+ ///
512
+ /// See the documentation for `torch::nn::SoftplusOptions` class to learn what
513
+ /// arguments are supported.
514
+ ///
515
+ /// Example:
516
+ /// ```
517
+ /// namespace F = torch::nn::functional;
518
+ /// F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0));
519
+ /// ```
520
+ using SoftplusFuncOptions = SoftplusOptions;
521
+ } // namespace functional
522
+
523
+ // ============================================================================
524
+
525
+ /// Options for the `Softshrink` module.
526
+ ///
527
+ /// Example:
528
+ /// ```
529
+ /// Softshrink model(SoftshrinkOptions(42.42));
530
+ /// ```
531
+ struct TORCH_API SoftshrinkOptions {
532
+ /* implicit */ SoftshrinkOptions(double lambda = 0.5);
533
+
534
+ /// the `lambda` value for the Softshrink formulation. Default: 0.5
535
+ TORCH_ARG(double, lambda);
536
+ };
537
+
538
+ namespace functional {
539
+ /// Options for `torch::nn::functional::softshrink`.
540
+ ///
541
+ /// See the documentation for `torch::nn::SoftshrinkOptions` class to learn what
542
+ /// arguments are supported.
543
+ ///
544
+ /// Example:
545
+ /// ```
546
+ /// namespace F = torch::nn::functional;
547
+ /// F::softshrink(x, F::SoftshrinkFuncOptions(0.42));
548
+ /// ```
549
+ using SoftshrinkFuncOptions = SoftshrinkOptions;
550
+ } // namespace functional
551
+
552
+ // ============================================================================
553
+
554
+ /// Options for the `Threshold` module.
555
+ ///
556
+ /// Example:
557
+ /// ```
558
+ /// Threshold model(ThresholdOptions(42.42, 24.24).inplace(true));
559
+ /// ```
560
+ struct TORCH_API ThresholdOptions {
561
+ ThresholdOptions(double threshold, double value)
562
+ : threshold_(threshold), value_(value) {}
563
+
564
+ /// The value to threshold at
565
+ TORCH_ARG(double, threshold);
566
+
567
+ /// The value to replace with
568
+ TORCH_ARG(double, value);
569
+
570
+ /// can optionally do the operation in-place. Default: False
571
+ TORCH_ARG(bool, inplace) = false;
572
+ };
573
+
574
+ namespace functional {
575
+ /// Options for `torch::nn::functional::threshold`.
576
+ ///
577
+ /// See the documentation for `torch::nn::ThresholdOptions` class to learn what
578
+ /// arguments are supported.
579
+ ///
580
+ /// Example:
581
+ /// ```
582
+ /// namespace F = torch::nn::functional;
583
+ /// F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true));
584
+ /// ```
585
+ using ThresholdFuncOptions = ThresholdOptions;
586
+ } // namespace functional
587
+
588
+ // ============================================================================
589
+
590
+ namespace functional {
591
+
592
+ /// Options for `torch::nn::functional::gumbel_softmax`.
593
+ ///
594
+ /// Example:
595
+ /// ```
596
+ /// namespace F = torch::nn::functional;
597
+ /// F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(-1));
598
+ /// ```
599
+ struct TORCH_API GumbelSoftmaxFuncOptions {
600
+ /// non-negative scalar temperature
601
+ TORCH_ARG(double, tau) = 1.0;
602
+
603
+ /// returned samples will be discretized as one-hot vectors,
604
+ /// but will be differentiated as if it is the soft sample in autograd.
605
+ /// Default: False
606
+ TORCH_ARG(bool, hard) = false;
607
+
608
+ /// dimension along which softmax will be computed. Default: -1
609
+ TORCH_ARG(int, dim) = -1;
610
+ };
611
+
612
+ } // namespace functional
613
+
614
+ // ============================================================================
615
+
616
+ /// Options for the `MultiheadAttention` module.
617
+ ///
618
+ /// Example:
619
+ /// ```
620
+ /// MultiheadAttention model(MultiheadAttentionOptions(20, 10).bias(false));
621
+ /// ```
622
+ struct TORCH_API MultiheadAttentionOptions {
623
+ MultiheadAttentionOptions(int64_t embed_dim, int64_t num_heads);
624
+
625
+ /// total dimension of the model.
626
+ TORCH_ARG(int64_t, embed_dim);
627
+
628
+ /// parallel attention heads.
629
+ TORCH_ARG(int64_t, num_heads);
630
+
631
+ /// a Dropout layer on attn_output_weights. Default: 0.0.
632
+ TORCH_ARG(double, dropout) = 0.0;
633
+
634
+ /// add bias as module parameter. Default: true.
635
+ TORCH_ARG(bool, bias) = true;
636
+
637
+ /// add bias to the key and value sequences at dim=0.
638
+ TORCH_ARG(bool, add_bias_kv) = false;
639
+
640
+ /// add a new batch of zeros to the key and value sequences at dim=1.
641
+ TORCH_ARG(bool, add_zero_attn) = false;
642
+
643
+ /// total number of features in key. Default: c10::nullopt.
644
+ TORCH_ARG(int64_t, kdim);
645
+
646
+ /// total number of features in key. Default: c10::nullopt.
647
+ TORCH_ARG(int64_t, vdim);
648
+ };
649
+
650
+ // ============================================================================
651
+
652
+ namespace functional {
653
+
654
+ /// Options for `torch::nn::functional::multi_head_attention_forward`
655
+ struct TORCH_API MultiheadAttentionForwardFuncOptions {
656
+ MultiheadAttentionForwardFuncOptions(
657
+ int64_t embed_dim_to_check,
658
+ int64_t num_heads,
659
+ Tensor in_proj_weight,
660
+ Tensor in_proj_bias,
661
+ Tensor bias_k,
662
+ Tensor bias_v,
663
+ bool add_zero_attn,
664
+ double dropout_p,
665
+ Tensor out_proj_weight,
666
+ Tensor out_proj_bias);
667
+
668
+ TORCH_ARG(int64_t, embed_dim_to_check);
669
+
670
+ TORCH_ARG(int64_t, num_heads);
671
+
672
+ TORCH_ARG(Tensor, in_proj_weight);
673
+
674
+ TORCH_ARG(Tensor, in_proj_bias);
675
+
676
+ TORCH_ARG(Tensor, bias_k);
677
+
678
+ TORCH_ARG(Tensor, bias_v);
679
+
680
+ TORCH_ARG(bool, add_zero_attn);
681
+
682
+ TORCH_ARG(double, dropout_p);
683
+
684
+ TORCH_ARG(Tensor, out_proj_weight);
685
+
686
+ TORCH_ARG(Tensor, out_proj_bias);
687
+
688
+ TORCH_ARG(bool, training) = true;
689
+
690
+ TORCH_ARG(Tensor, key_padding_mask) = {};
691
+
692
+ TORCH_ARG(bool, need_weights) = true;
693
+
694
+ TORCH_ARG(Tensor, attn_mask) = {};
695
+
696
+ TORCH_ARG(bool, use_separate_proj_weight) = false;
697
+
698
+ TORCH_ARG(Tensor, q_proj_weight) = {};
699
+
700
+ TORCH_ARG(Tensor, k_proj_weight) = {};
701
+
702
+ TORCH_ARG(Tensor, v_proj_weight) = {};
703
+
704
+ TORCH_ARG(Tensor, static_k) = {};
705
+
706
+ TORCH_ARG(Tensor, static_v) = {};
707
+
708
+ TORCH_ARG(bool, average_attn_weights) = true;
709
+ };
710
+
711
+ } // namespace functional
712
+
713
+ } // namespace nn
714
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/adaptive.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/types.h>
6
+
7
+ namespace torch {
8
+ namespace nn {
9
+
10
+ /// Options for the `AdaptiveLogSoftmaxWithLoss` module.
11
+ ///
12
+ /// Example:
13
+ /// ```
14
+ /// AdaptiveLogSoftmaxWithLoss model(AdaptiveLogSoftmaxWithLossOptions(8, 10,
15
+ /// {4, 8}).div_value(2.).head_bias(true));
16
+ /// ```
17
+ struct TORCH_API AdaptiveLogSoftmaxWithLossOptions {
18
+ /* implicit */ AdaptiveLogSoftmaxWithLossOptions(
19
+ int64_t in_features,
20
+ int64_t n_classes,
21
+ std::vector<int64_t> cutoffs);
22
+
23
+ /// Number of features in the input tensor
24
+ TORCH_ARG(int64_t, in_features);
25
+
26
+ /// Number of classes in the dataset
27
+ TORCH_ARG(int64_t, n_classes);
28
+
29
+ /// Cutoffs used to assign targets to their buckets
30
+ TORCH_ARG(std::vector<int64_t>, cutoffs);
31
+
32
+ /// value used as an exponent to compute sizes of the clusters. Default: 4.0
33
+ TORCH_ARG(double, div_value) = 4.;
34
+
35
+ /// If ``true``, adds a bias term to the 'head' of
36
+ /// the adaptive softmax. Default: false
37
+ TORCH_ARG(bool, head_bias) = false;
38
+ };
39
+
40
+ } // namespace nn
41
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/batchnorm.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/types.h>
6
+
7
+ namespace torch {
8
+ namespace nn {
9
+
10
+ /// Options for the `BatchNorm` module.
11
+ struct TORCH_API BatchNormOptions {
12
+ /* implicit */ BatchNormOptions(int64_t num_features);
13
+
14
+ /// The number of features of the input tensor.
15
+ /// Changing this parameter after construction __has no effect__.
16
+ TORCH_ARG(int64_t, num_features);
17
+
18
+ /// The epsilon value added for numerical stability.
19
+ /// Changing this parameter after construction __is effective__.
20
+ TORCH_ARG(double, eps) = 1e-5;
21
+
22
+ /// A momentum multiplier for the mean and variance.
23
+ /// Changing this parameter after construction __is effective__.
24
+ TORCH_ARG(c10::optional<double>, momentum) = 0.1;
25
+
26
+ /// Whether to learn a scale and bias that are applied in an affine
27
+ /// transformation on the input.
28
+ /// Changing this parameter after construction __has no effect__.
29
+ TORCH_ARG(bool, affine) = true;
30
+
31
+ /// Whether to store and update batch statistics (mean and variance) in the
32
+ /// module.
33
+ /// Changing this parameter after construction __has no effect__.
34
+ TORCH_ARG(bool, track_running_stats) = true;
35
+ };
36
+
37
+ /// Options for the `BatchNorm1d` module.
38
+ ///
39
+ /// Example:
40
+ /// ```
41
+ /// BatchNorm1d
42
+ /// model(BatchNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
43
+ /// ```
44
+ using BatchNorm1dOptions = BatchNormOptions;
45
+
46
+ /// Options for the `BatchNorm2d` module.
47
+ ///
48
+ /// Example:
49
+ /// ```
50
+ /// BatchNorm2d
51
+ /// model(BatchNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
52
+ /// ```
53
+ using BatchNorm2dOptions = BatchNormOptions;
54
+
55
+ /// Options for the `BatchNorm3d` module.
56
+ ///
57
+ /// Example:
58
+ /// ```
59
+ /// BatchNorm3d
60
+ /// model(BatchNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
61
+ /// ```
62
+ using BatchNorm3dOptions = BatchNormOptions;
63
+
64
+ // ============================================================================
65
+
66
+ namespace functional {
67
+
68
+ /// Options for `torch::nn::functional::batch_norm`.
69
+ ///
70
+ /// Example:
71
+ /// ```
72
+ /// namespace F = torch::nn::functional;
73
+ /// F::batch_norm(input, mean, variance,
74
+ /// F::BatchNormFuncOptions().weight(weight).bias(bias).momentum(0.1).eps(1e-05).training(false));
75
+ /// ```
76
+ struct TORCH_API BatchNormFuncOptions {
77
+ TORCH_ARG(Tensor, weight) = Tensor();
78
+
79
+ TORCH_ARG(Tensor, bias) = Tensor();
80
+
81
+ TORCH_ARG(bool, training) = false;
82
+
83
+ /// A momentum multiplier for the mean and variance.
84
+ /// Changing this parameter after construction __is effective__.
85
+ TORCH_ARG(c10::optional<double>, momentum) = 0.1;
86
+
87
+ /// The epsilon value added for numerical stability.
88
+ /// Changing this parameter after construction __is effective__.
89
+ TORCH_ARG(double, eps) = 1e-5;
90
+ };
91
+
92
+ } // namespace functional
93
+
94
+ } // namespace nn
95
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/conv.h ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/enum.h>
6
+ #include <torch/expanding_array.h>
7
+ #include <torch/types.h>
8
+
9
+ namespace torch {
10
+ namespace nn {
11
+
12
+ namespace detail {
13
+
14
+ typedef std::variant<
15
+ enumtype::kZeros,
16
+ enumtype::kReflect,
17
+ enumtype::kReplicate,
18
+ enumtype::kCircular>
19
+ conv_padding_mode_t;
20
+
21
+ template <size_t D>
22
+ using conv_padding_t =
23
+ std::variant<ExpandingArray<D>, enumtype::kValid, enumtype::kSame>;
24
+
25
+ /// Options for a `D`-dimensional convolution or convolution transpose module.
26
+ template <size_t D>
27
+ struct ConvNdOptions {
28
+ using padding_t = conv_padding_t<D>;
29
+ ConvNdOptions(
30
+ int64_t in_channels,
31
+ int64_t out_channels,
32
+ ExpandingArray<D> kernel_size)
33
+ : in_channels_(in_channels),
34
+ out_channels_(out_channels),
35
+ kernel_size_(std::move(kernel_size)) {}
36
+
37
+ /// The number of channels the input volumes will have.
38
+ /// Changing this parameter after construction __has no effect__.
39
+ TORCH_ARG(int64_t, in_channels);
40
+
41
+ /// The number of output channels the convolution should produce.
42
+ /// Changing this parameter after construction __has no effect__.
43
+ TORCH_ARG(int64_t, out_channels);
44
+
45
+ /// The kernel size to use.
46
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
47
+ /// numbers.
48
+ /// This parameter __can__ be changed after construction.
49
+ TORCH_ARG(ExpandingArray<D>, kernel_size);
50
+
51
+ /// The stride of the convolution.
52
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
53
+ /// numbers.
54
+ /// This parameter __can__ be changed after construction.
55
+ TORCH_ARG(ExpandingArray<D>, stride) = 1;
56
+
57
+ /// The padding to add to the input volumes.
58
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
59
+ /// numbers.
60
+ /// This parameter __can__ be changed after construction.
61
+ TORCH_ARG(padding_t, padding) = 0;
62
+
63
+ public:
64
+ decltype(auto) padding(std::initializer_list<int64_t> il) {
65
+ return padding(IntArrayRef{il});
66
+ }
67
+
68
+ /// The kernel dilation.
69
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
70
+ /// numbers.
71
+ /// This parameter __can__ be changed after construction.
72
+ TORCH_ARG(ExpandingArray<D>, dilation) = 1;
73
+
74
+ /// If true, convolutions will be transpose convolutions (a.k.a.
75
+ /// deconvolutions).
76
+ /// Changing this parameter after construction __has no effect__.
77
+ TORCH_ARG(bool, transposed) = false;
78
+
79
+ /// For transpose convolutions, the padding to add to output volumes.
80
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
81
+ /// numbers.
82
+ /// This parameter __can__ be changed after construction.
83
+ TORCH_ARG(ExpandingArray<D>, output_padding) = 0;
84
+
85
+ /// The number of convolution groups.
86
+ /// This parameter __can__ be changed after construction.
87
+ TORCH_ARG(int64_t, groups) = 1;
88
+
89
+ /// Whether to add a bias after individual applications of the kernel.
90
+ /// Changing this parameter after construction __has no effect__.
91
+ TORCH_ARG(bool, bias) = true;
92
+
93
+ /// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or
94
+ /// `torch::kCircular`. Default: `torch::kZeros`
95
+ TORCH_ARG(conv_padding_mode_t, padding_mode) = torch::kZeros;
96
+ };
97
+
98
+ } // namespace detail
99
+
100
+ // ============================================================================
101
+
102
+ /// Options for a `D`-dimensional convolution module.
103
+ template <size_t D>
104
+ struct ConvOptions {
105
+ using padding_mode_t = detail::conv_padding_mode_t;
106
+ using padding_t = detail::conv_padding_t<D>;
107
+
108
+ ConvOptions(
109
+ int64_t in_channels,
110
+ int64_t out_channels,
111
+ ExpandingArray<D> kernel_size)
112
+ : in_channels_(in_channels),
113
+ out_channels_(out_channels),
114
+ kernel_size_(std::move(kernel_size)) {}
115
+
116
+ /// The number of channels the input volumes will have.
117
+ /// Changing this parameter after construction __has no effect__.
118
+ TORCH_ARG(int64_t, in_channels);
119
+
120
+ /// The number of output channels the convolution should produce.
121
+ /// Changing this parameter after construction __has no effect__.
122
+ TORCH_ARG(int64_t, out_channels);
123
+
124
+ /// The kernel size to use.
125
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
126
+ /// numbers.
127
+ /// This parameter __can__ be changed after construction.
128
+ TORCH_ARG(ExpandingArray<D>, kernel_size);
129
+
130
+ /// The stride of the convolution.
131
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
132
+ /// numbers.
133
+ /// This parameter __can__ be changed after construction.
134
+ TORCH_ARG(ExpandingArray<D>, stride) = 1;
135
+
136
+ /// The padding to add to the input volumes.
137
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
138
+ /// numbers.
139
+ /// This parameter __can__ be changed after construction.
140
+ TORCH_ARG(padding_t, padding) = 0;
141
+
142
+ public:
143
+ decltype(auto) padding(std::initializer_list<int64_t> il) {
144
+ return padding(IntArrayRef{il});
145
+ }
146
+
147
+ /// The kernel dilation.
148
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
149
+ /// numbers.
150
+ /// This parameter __can__ be changed after construction.
151
+ TORCH_ARG(ExpandingArray<D>, dilation) = 1;
152
+
153
+ /// The number of convolution groups.
154
+ /// This parameter __can__ be changed after construction.
155
+ TORCH_ARG(int64_t, groups) = 1;
156
+
157
+ /// Whether to add a bias after individual applications of the kernel.
158
+ /// Changing this parameter after construction __has no effect__.
159
+ TORCH_ARG(bool, bias) = true;
160
+
161
+ /// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or
162
+ /// `torch::kCircular`. Default: `torch::kZeros`
163
+ TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros;
164
+ };
165
+
166
+ /// `ConvOptions` specialized for the `Conv1d` module.
167
+ ///
168
+ /// Example:
169
+ /// ```
170
+ /// Conv1d model(Conv1dOptions(3, 2, 3).stride(1).bias(false));
171
+ /// ```
172
+ using Conv1dOptions = ConvOptions<1>;
173
+
174
+ /// `ConvOptions` specialized for the `Conv2d` module.
175
+ ///
176
+ /// Example:
177
+ /// ```
178
+ /// Conv2d model(Conv2dOptions(3, 2, 3).stride(1).bias(false));
179
+ /// ```
180
+ using Conv2dOptions = ConvOptions<2>;
181
+
182
+ /// `ConvOptions` specialized for the `Conv3d` module.
183
+ ///
184
+ /// Example:
185
+ /// ```
186
+ /// Conv3d model(Conv3dOptions(3, 2, 3).stride(1).bias(false));
187
+ /// ```
188
+ using Conv3dOptions = ConvOptions<3>;
189
+
190
+ // ============================================================================
191
+
192
+ namespace functional {
193
+
194
+ /// Options for a `D`-dimensional convolution functional.
195
+ template <size_t D>
196
+ struct ConvFuncOptions {
197
+ using padding_t = torch::nn::detail::conv_padding_t<D>;
198
+
199
+ /// optional bias of shape `(out_channels)`. Default: ``None``
200
+ TORCH_ARG(torch::Tensor, bias) = Tensor();
201
+
202
+ /// The stride of the convolving kernel.
203
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
204
+ /// numbers.
205
+ TORCH_ARG(ExpandingArray<D>, stride) = 1;
206
+
207
+ /// Implicit paddings on both sides of the input.
208
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
209
+ /// numbers.
210
+ TORCH_ARG(padding_t, padding) = 0;
211
+
212
+ public:
213
+ decltype(auto) padding(std::initializer_list<int64_t> il) {
214
+ return padding(IntArrayRef{il});
215
+ }
216
+
217
+ /// The spacing between kernel elements.
218
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
219
+ /// numbers.
220
+ TORCH_ARG(ExpandingArray<D>, dilation) = 1;
221
+
222
+ /// Split input into groups, `in_channels` should be divisible by
223
+ /// the number of groups.
224
+ TORCH_ARG(int64_t, groups) = 1;
225
+ };
226
+
227
+ /// `ConvFuncOptions` specialized for `torch::nn::functional::conv1d`.
228
+ ///
229
+ /// Example:
230
+ /// ```
231
+ /// namespace F = torch::nn::functional;
232
+ /// F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1));
233
+ /// ```
234
+ using Conv1dFuncOptions = ConvFuncOptions<1>;
235
+
236
+ /// `ConvFuncOptions` specialized for `torch::nn::functional::conv2d`.
237
+ ///
238
+ /// Example:
239
+ /// ```
240
+ /// namespace F = torch::nn::functional;
241
+ /// F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1));
242
+ /// ```
243
+ using Conv2dFuncOptions = ConvFuncOptions<2>;
244
+
245
+ /// `ConvFuncOptions` specialized for `torch::nn::functional::conv3d`.
246
+ ///
247
+ /// Example:
248
+ /// ```
249
+ /// namespace F = torch::nn::functional;
250
+ /// F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1));
251
+ /// ```
252
+ using Conv3dFuncOptions = ConvFuncOptions<3>;
253
+
254
+ } // namespace functional
255
+
256
+ // ============================================================================
257
+
258
+ template <size_t D>
259
+ struct ConvTransposeOptions {
260
+ using padding_mode_t = detail::conv_padding_mode_t;
261
+
262
+ ConvTransposeOptions(
263
+ int64_t in_channels,
264
+ int64_t out_channels,
265
+ ExpandingArray<D> kernel_size)
266
+ : in_channels_(in_channels),
267
+ out_channels_(out_channels),
268
+ kernel_size_(std::move(kernel_size)) {}
269
+
270
+ /// The number of channels the input volumes will have.
271
+ /// Changing this parameter after construction __has no effect__.
272
+ TORCH_ARG(int64_t, in_channels);
273
+
274
+ /// The number of output channels the convolution should produce.
275
+ /// Changing this parameter after construction __has no effect__.
276
+ TORCH_ARG(int64_t, out_channels);
277
+
278
+ /// The kernel size to use.
279
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
280
+ /// numbers.
281
+ /// This parameter __can__ be changed after construction.
282
+ TORCH_ARG(ExpandingArray<D>, kernel_size);
283
+
284
+ /// The stride of the convolution.
285
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
286
+ /// numbers.
287
+ /// This parameter __can__ be changed after construction.
288
+ TORCH_ARG(ExpandingArray<D>, stride) = 1;
289
+
290
+ /// The padding to add to the input volumes.
291
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
292
+ /// numbers.
293
+ /// This parameter __can__ be changed after construction.
294
+ TORCH_ARG(ExpandingArray<D>, padding) = 0;
295
+
296
+ /// For transpose convolutions, the padding to add to output volumes.
297
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
298
+ /// numbers.
299
+ /// This parameter __can__ be changed after construction.
300
+ TORCH_ARG(ExpandingArray<D>, output_padding) = 0;
301
+
302
+ /// The number of convolution groups.
303
+ /// This parameter __can__ be changed after construction.
304
+ TORCH_ARG(int64_t, groups) = 1;
305
+
306
+ /// Whether to add a bias after individual applications of the kernel.
307
+ /// Changing this parameter after construction __has no effect__.
308
+ TORCH_ARG(bool, bias) = true;
309
+
310
+ /// The kernel dilation.
311
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
312
+ /// numbers.
313
+ /// This parameter __can__ be changed after construction.
314
+ TORCH_ARG(ExpandingArray<D>, dilation) = 1;
315
+
316
+ /// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or
317
+ /// `torch::kCircular`. Default: `torch::kZeros`
318
+ TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros;
319
+ };
320
+
321
+ /// `ConvTransposeOptions` specialized for the `ConvTranspose1d` module.
322
+ ///
323
+ /// Example:
324
+ /// ```
325
+ /// ConvTranspose1d model(ConvTranspose1dOptions(3, 2,
326
+ /// 3).stride(1).bias(false));
327
+ /// ```
328
+ using ConvTranspose1dOptions = ConvTransposeOptions<1>;
329
+
330
+ /// `ConvTransposeOptions` specialized for the `ConvTranspose2d` module.
331
+ ///
332
+ /// Example:
333
+ /// ```
334
+ /// ConvTranspose2d model(ConvTranspose2dOptions(3, 2,
335
+ /// 3).stride(1).bias(false));
336
+ /// ```
337
+ using ConvTranspose2dOptions = ConvTransposeOptions<2>;
338
+
339
+ /// `ConvTransposeOptions` specialized for the `ConvTranspose3d` module.
340
+ ///
341
+ /// Example:
342
+ /// ```
343
+ /// ConvTranspose3d model(ConvTranspose3dOptions(2, 2,
344
+ /// 2).stride(1).bias(false));
345
+ /// ```
346
+ using ConvTranspose3dOptions = ConvTransposeOptions<3>;
347
+
348
+ // ============================================================================
349
+
350
+ namespace functional {
351
+
352
+ /// Options for a `D`-dimensional convolution functional.
353
+ template <size_t D>
354
+ struct ConvTransposeFuncOptions {
355
+ /// optional bias of shape `(out_channels)`. Default: ``None``
356
+ TORCH_ARG(torch::Tensor, bias) = Tensor();
357
+
358
+ /// The stride of the convolving kernel.
359
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
360
+ /// numbers.
361
+ TORCH_ARG(ExpandingArray<D>, stride) = 1;
362
+
363
+ /// Implicit paddings on both sides of the input.
364
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
365
+ /// numbers.
366
+ TORCH_ARG(ExpandingArray<D>, padding) = 0;
367
+
368
+ /// Additional size added to one side of each dimension in the output shape.
369
+ /// Default: 0
370
+ TORCH_ARG(ExpandingArray<D>, output_padding) = 0;
371
+
372
+ /// Split input into groups, `in_channels` should be divisible by
373
+ /// the number of groups.
374
+ TORCH_ARG(int64_t, groups) = 1;
375
+
376
+ /// The spacing between kernel elements.
377
+ /// For a `D`-dim convolution, must be a single number or a list of `D`
378
+ /// numbers.
379
+ TORCH_ARG(ExpandingArray<D>, dilation) = 1;
380
+ };
381
+
382
+ /// `ConvTransposeFuncOptions` specialized for
383
+ /// `torch::nn::functional::conv_transpose1d`.
384
+ ///
385
+ /// Example:
386
+ /// ```
387
+ /// namespace F = torch::nn::functional;
388
+ /// F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1));
389
+ /// ```
390
+ using ConvTranspose1dFuncOptions = ConvTransposeFuncOptions<1>;
391
+
392
+ /// `ConvTransposeFuncOptions` specialized for
393
+ /// `torch::nn::functional::conv_transpose2d`.
394
+ ///
395
+ /// Example:
396
+ /// ```
397
+ /// namespace F = torch::nn::functional;
398
+ /// F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1));
399
+ /// ```
400
+ using ConvTranspose2dFuncOptions = ConvTransposeFuncOptions<2>;
401
+
402
+ /// `ConvTransposeFuncOptions` specialized for
403
+ /// `torch::nn::functional::conv_transpose3d`.
404
+ ///
405
+ /// Example:
406
+ /// ```
407
+ /// namespace F = torch::nn::functional;
408
+ /// F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1));
409
+ /// ```
410
+ using ConvTranspose3dFuncOptions = ConvTransposeFuncOptions<3>;
411
+
412
+ } // namespace functional
413
+
414
+ } // namespace nn
415
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/distance.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/types.h>
6
+
7
+ namespace torch {
8
+ namespace nn {
9
+
10
+ /// Options for the `CosineSimilarity` module.
11
+ ///
12
+ /// Example:
13
+ /// ```
14
+ /// CosineSimilarity model(CosineSimilarityOptions().dim(0).eps(0.5));
15
+ /// ```
16
+ struct TORCH_API CosineSimilarityOptions {
17
+ /// Dimension where cosine similarity is computed. Default: 1
18
+ TORCH_ARG(int64_t, dim) = 1;
19
+ /// Small value to avoid division by zero. Default: 1e-8
20
+ TORCH_ARG(double, eps) = 1e-8;
21
+ };
22
+
23
+ namespace functional {
24
+ /// Options for `torch::nn::functional::cosine_similarity`.
25
+ ///
26
+ /// See the documentation for `torch::nn::CosineSimilarityOptions` class to
27
+ /// learn what arguments are supported.
28
+ ///
29
+ /// Example:
30
+ /// ```
31
+ /// namespace F = torch::nn::functional;
32
+ /// F::cosine_similarity(input1, input2,
33
+ /// F::CosineSimilarityFuncOptions().dim(1));
34
+ /// ```
35
+ using CosineSimilarityFuncOptions = CosineSimilarityOptions;
36
+ } // namespace functional
37
+
38
+ // ============================================================================
39
+
40
+ /// Options for the `PairwiseDistance` module.
41
+ ///
42
+ /// Example:
43
+ /// ```
44
+ /// PairwiseDistance
45
+ /// model(PairwiseDistanceOptions().p(3).eps(0.5).keepdim(true));
46
+ /// ```
47
+ struct TORCH_API PairwiseDistanceOptions {
48
+ /// The norm degree. Default: 2
49
+ TORCH_ARG(double, p) = 2.0;
50
+ /// Small value to avoid division by zero. Default: 1e-6
51
+ TORCH_ARG(double, eps) = 1e-6;
52
+ /// Determines whether or not to keep the vector dimension. Default: false
53
+ TORCH_ARG(bool, keepdim) = false;
54
+ };
55
+
56
+ namespace functional {
57
+ /// Options for `torch::nn::functional::pairwise_distance`.
58
+ ///
59
+ /// See the documentation for `torch::nn::PairwiseDistanceOptions` class to
60
+ /// learn what arguments are supported.
61
+ ///
62
+ /// Example:
63
+ /// ```
64
+ /// namespace F = torch::nn::functional;
65
+ /// F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1));
66
+ /// ```
67
+ using PairwiseDistanceFuncOptions = PairwiseDistanceOptions;
68
+ } // namespace functional
69
+
70
+ } // namespace nn
71
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/dropout.h ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/types.h>
6
+
7
+ namespace torch {
8
+ namespace nn {
9
+
10
+ /// Options for the `Dropout` module.
11
+ ///
12
+ /// Example:
13
+ /// ```
14
+ /// Dropout model(DropoutOptions().p(0.42).inplace(true));
15
+ /// ```
16
+ struct TORCH_API DropoutOptions {
17
+ /* implicit */ DropoutOptions(double p = 0.5);
18
+
19
+ /// The probability of an element to be zeroed. Default: 0.5
20
+ TORCH_ARG(double, p) = 0.5;
21
+
22
+ /// can optionally do the operation in-place. Default: False
23
+ TORCH_ARG(bool, inplace) = false;
24
+ };
25
+
26
+ /// Options for the `Dropout2d` module.
27
+ ///
28
+ /// Example:
29
+ /// ```
30
+ /// Dropout2d model(Dropout2dOptions().p(0.42).inplace(true));
31
+ /// ```
32
+ using Dropout2dOptions = DropoutOptions;
33
+
34
+ /// Options for the `Dropout3d` module.
35
+ ///
36
+ /// Example:
37
+ /// ```
38
+ /// Dropout3d model(Dropout3dOptions().p(0.42).inplace(true));
39
+ /// ```
40
+ using Dropout3dOptions = DropoutOptions;
41
+
42
+ /// Options for the `AlphaDropout` module.
43
+ ///
44
+ /// Example:
45
+ /// ```
46
+ /// AlphaDropout model(AlphaDropoutOptions(0.2).inplace(true));
47
+ /// ```
48
+ using AlphaDropoutOptions = DropoutOptions;
49
+
50
+ /// Options for the `FeatureAlphaDropout` module.
51
+ ///
52
+ /// Example:
53
+ /// ```
54
+ /// FeatureAlphaDropout model(FeatureAlphaDropoutOptions(0.2).inplace(true));
55
+ /// ```
56
+ using FeatureAlphaDropoutOptions = DropoutOptions;
57
+
58
+ namespace functional {
59
+
60
+ /// Options for `torch::nn::functional::dropout`.
61
+ ///
62
+ /// Example:
63
+ /// ```
64
+ /// namespace F = torch::nn::functional;
65
+ /// F::dropout(input, F::DropoutFuncOptions().p(0.5));
66
+ /// ```
67
+ struct TORCH_API DropoutFuncOptions {
68
+ /// The probability of an element to be zeroed. Default: 0.5
69
+ TORCH_ARG(double, p) = 0.5;
70
+
71
+ TORCH_ARG(bool, training) = true;
72
+
73
+ /// can optionally do the operation in-place. Default: False
74
+ TORCH_ARG(bool, inplace) = false;
75
+ };
76
+
77
+ /// Options for `torch::nn::functional::dropout2d`.
78
+ ///
79
+ /// Example:
80
+ /// ```
81
+ /// namespace F = torch::nn::functional;
82
+ /// F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5));
83
+ /// ```
84
+ using Dropout2dFuncOptions = DropoutFuncOptions;
85
+
86
+ /// Options for `torch::nn::functional::dropout3d`.
87
+ ///
88
+ /// Example:
89
+ /// ```
90
+ /// namespace F = torch::nn::functional;
91
+ /// F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5));
92
+ /// ```
93
+ using Dropout3dFuncOptions = DropoutFuncOptions;
94
+
95
+ /// Options for `torch::nn::functional::alpha_dropout`.
96
+ ///
97
+ /// Example:
98
+ /// ```
99
+ /// namespace F = torch::nn::functional;
100
+ /// F::alpha_dropout(input,
101
+ /// F::AlphaDropoutFuncOptions().p(0.5).training(false));
102
+ /// ```
103
+ struct TORCH_API AlphaDropoutFuncOptions {
104
+ TORCH_ARG(double, p) = 0.5;
105
+
106
+ TORCH_ARG(bool, training) = false;
107
+
108
+ TORCH_ARG(bool, inplace) = false;
109
+ };
110
+
111
+ /// Options for `torch::nn::functional::feature_alpha_dropout`.
112
+ ///
113
+ /// Example:
114
+ /// ```
115
+ /// namespace F = torch::nn::functional;
116
+ /// F::feature_alpha_dropout(input,
117
+ /// F::FeatureAlphaDropoutFuncOptions().p(0.5).training(false));
118
+ /// ```
119
+ struct TORCH_API FeatureAlphaDropoutFuncOptions {
120
+ TORCH_ARG(double, p) = 0.5;
121
+
122
+ TORCH_ARG(bool, training) = false;
123
+
124
+ TORCH_ARG(bool, inplace) = false;
125
+ };
126
+
127
+ } // namespace functional
128
+
129
+ } // namespace nn
130
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/embedding.h ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/enum.h>
6
+ #include <torch/types.h>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+
11
+ /// Options for the `Embedding` module.
12
+ ///
13
+ /// Example:
14
+ /// ```
15
+ /// Embedding model(EmbeddingOptions(10,
16
+ /// 2).padding_idx(3).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true));
17
+ /// ```
18
+ struct TORCH_API EmbeddingOptions {
19
+ EmbeddingOptions(int64_t num_embeddings, int64_t embedding_dim);
20
+
21
+ /// The size of the dictionary of embeddings.
22
+ TORCH_ARG(int64_t, num_embeddings);
23
+ /// The size of each embedding vector.
24
+ TORCH_ARG(int64_t, embedding_dim);
25
+ /// If specified, the entries at `padding_idx` do not contribute to the
26
+ /// gradient; therefore, the embedding vector at `padding_idx` is not updated
27
+ /// during training, i.e. it remains as a fixed "pad". For a newly constructed
28
+ /// Embedding, the embedding vector at `padding_idx` will default to all
29
+ /// zeros, but can be updated to another value to be used as the padding
30
+ /// vector.
31
+ TORCH_ARG(c10::optional<int64_t>, padding_idx) = c10::nullopt;
32
+ /// If given, each embedding vector with norm larger than `max_norm` is
33
+ /// renormalized to have norm `max_norm`.
34
+ TORCH_ARG(c10::optional<double>, max_norm) = c10::nullopt;
35
+ /// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
36
+ TORCH_ARG(double, norm_type) = 2.;
37
+ /// If given, this will scale gradients by the inverse of frequency of the
38
+ /// words in the mini-batch. Default ``false``.
39
+ TORCH_ARG(bool, scale_grad_by_freq) = false;
40
+ /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
41
+ TORCH_ARG(bool, sparse) = false;
42
+ /// The learnable weights of the module of shape (num_embeddings,
43
+ /// embedding_dim)
44
+ TORCH_ARG(torch::Tensor, _weight) = Tensor();
45
+ };
46
+
47
+ // ============================================================================
48
+
49
+ /// Options for the `Embedding::from_pretrained` function.
50
+ struct TORCH_API EmbeddingFromPretrainedOptions {
51
+ /// If ``true``, the tensor does not get updated in the learning process.
52
+ /// Equivalent to ``embedding.weight.requires_grad_(false)``. Default:
53
+ /// ``true``
54
+ TORCH_ARG(bool, freeze) = true;
55
+ /// If specified, the entries at `padding_idx` do not contribute to the
56
+ /// gradient; therefore, the embedding vector at `padding_idx` is not updated
57
+ /// during training, i.e. it remains as a fixed "pad".
58
+ TORCH_ARG(c10::optional<int64_t>, padding_idx) = c10::nullopt;
59
+ /// If given, each embedding vector with norm larger than `max_norm` is
60
+ /// renormalized to have norm `max_norm`.
61
+ TORCH_ARG(c10::optional<double>, max_norm) = c10::nullopt;
62
+ /// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
63
+ TORCH_ARG(double, norm_type) = 2.;
64
+ /// If given, this will scale gradients by the inverse of frequency of the
65
+ /// words in the mini-batch. Default ``false``.
66
+ TORCH_ARG(bool, scale_grad_by_freq) = false;
67
+ /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
68
+ TORCH_ARG(bool, sparse) = false;
69
+ };
70
+
71
+ // ============================================================================
72
+
73
+ namespace functional {
74
+
75
+ /// Options for `torch::nn::functional::embedding`.
76
+ ///
77
+ /// Example:
78
+ /// ```
79
+ /// namespace F = torch::nn::functional;
80
+ /// F::embedding(input, weight,
81
+ /// F::EmbeddingFuncOptions().norm_type(2.5).scale_grad_by_freq(true).sparse(true));
82
+ /// ```
83
+ struct TORCH_API EmbeddingFuncOptions {
84
+ /// If specified, the entries at `padding_idx` do not contribute to the
85
+ /// gradient; therefore, the embedding vector at `padding_idx` is not updated
86
+ /// during training, i.e. it remains as a fixed "pad".
87
+ TORCH_ARG(c10::optional<int64_t>, padding_idx) = c10::nullopt;
88
+ /// If given, each embedding vector with norm larger than `max_norm` is
89
+ /// renormalized to have norm `max_norm`.
90
+ TORCH_ARG(c10::optional<double>, max_norm) = c10::nullopt;
91
+ /// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
92
+ TORCH_ARG(double, norm_type) = 2.;
93
+ /// If given, this will scale gradients by the inverse of frequency of the
94
+ /// words in the mini-batch. Default ``false``.
95
+ TORCH_ARG(bool, scale_grad_by_freq) = false;
96
+ /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
97
+ TORCH_ARG(bool, sparse) = false;
98
+ };
99
+
100
+ } // namespace functional
101
+
102
+ // ============================================================================
103
+
104
+ typedef std::variant<enumtype::kSum, enumtype::kMean, enumtype::kMax>
105
+ EmbeddingBagMode;
106
+
107
+ /// Options for the `EmbeddingBag` module.
108
+ ///
109
+ /// Example:
110
+ /// ```
111
+ /// EmbeddingBag model(EmbeddingBagOptions(10,
112
+ /// 2).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true).mode(torch::kSum));
113
+ /// ```
114
+ struct TORCH_API EmbeddingBagOptions {
115
+ EmbeddingBagOptions(int64_t num_embeddings, int64_t embedding_dim);
116
+
117
+ /// The size of the dictionary of embeddings.
118
+ TORCH_ARG(int64_t, num_embeddings);
119
+ /// The size of each embedding vector.
120
+ TORCH_ARG(int64_t, embedding_dim);
121
+ /// If given, each embedding vector with norm larger than `max_norm` is
122
+ /// renormalized to have norm `max_norm`.
123
+ TORCH_ARG(c10::optional<double>, max_norm) = c10::nullopt;
124
+ /// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
125
+ TORCH_ARG(double, norm_type) = 2.;
126
+ /// If given, this will scale gradients by the inverse of frequency of the
127
+ /// words in the mini-batch. Default ``false``. Note: this option is not
128
+ /// supported when ``mode="kMax"``.
129
+ TORCH_ARG(bool, scale_grad_by_freq) = false;
130
+ /// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the
131
+ /// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights`
132
+ /// into consideration. ``"kMean"`` computes the average of the values in the
133
+ /// bag, ``"kMax"`` computes the max value over each bag.
134
+ TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean;
135
+ /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
136
+ /// Note: this option is not supported when ``mode="kMax"``.
137
+ TORCH_ARG(bool, sparse) = false;
138
+ /// The learnable weights of the module of shape (num_embeddings,
139
+ /// embedding_dim)
140
+ TORCH_ARG(torch::Tensor, _weight) = Tensor();
141
+ /// If ``true``, `offsets` has one additional element, where the last element
142
+ /// is equivalent to the size of `indices`. This matches the CSR format.
143
+ TORCH_ARG(bool, include_last_offset) = false;
144
+ /// If specified, the entries at `padding_idx` do not contribute to the
145
+ /// gradient; therefore, the embedding vector at padding_idx is not updated
146
+ /// during training, i.e. it remains as a fixed "pad". For a newly constructed
147
+ /// EmbeddingBag, the embedding vector at `padding_idx` will default to all
148
+ /// zeros, but can be updated to another value to be used as the padding
149
+ /// vector. Note that the embedding vector at `padding_idx` is excluded from
150
+ /// the reduction.
151
+ TORCH_ARG(c10::optional<int64_t>, padding_idx) = c10::nullopt;
152
+ };
153
+
154
+ // ============================================================================
155
+
156
+ /// Options for the `EmbeddingBag::from_pretrained` function.
157
+ struct TORCH_API EmbeddingBagFromPretrainedOptions {
158
+ /// If ``true``, the tensor does not get updated in the learning process.
159
+ /// Equivalent to ``embeddingbag.weight.requires_grad_(false)``. Default:
160
+ /// ``true``
161
+ TORCH_ARG(bool, freeze) = true;
162
+ /// If given, each embedding vector with norm larger than `max_norm` is
163
+ /// renormalized to have norm `max_norm`.
164
+ TORCH_ARG(c10::optional<double>, max_norm) = c10::nullopt;
165
+ /// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
166
+ TORCH_ARG(double, norm_type) = 2.;
167
+ /// If given, this will scale gradients by the inverse of frequency of the
168
+ /// words in the mini-batch. Default ``false``. Note: this option is not
169
+ /// supported when ``mode="kMax"``.
170
+ TORCH_ARG(bool, scale_grad_by_freq) = false;
171
+ /// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the
172
+ /// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights`
173
+ /// into consideration. ``"kMean"`` computes the average of the values in the
174
+ /// bag, ``"kMax"`` computes the max value over each bag.
175
+ TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean;
176
+ /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
177
+ /// Note: this option is not supported when ``mode="kMax"``.
178
+ TORCH_ARG(bool, sparse) = false;
179
+ /// If ``true``, `offsets` has one additional element, where the last element
180
+ /// is equivalent to the size of `indices`. This matches the CSR format. Note:
181
+ /// this option is currently only supported when ``mode="sum"``.
182
+ TORCH_ARG(bool, include_last_offset) = false;
183
+ /// If specified, the entries at `padding_idx` do not contribute to the
184
+ /// gradient; therefore, the embedding vector at padding_idx is not updated
185
+ /// during training, i.e. it remains as a fixed "pad". Note that the embedding
186
+ /// vector at `padding_idx` is excluded from the reduction.
187
+ TORCH_ARG(c10::optional<int64_t>, padding_idx) = c10::nullopt;
188
+ };
189
+
190
+ // ============================================================================
191
+
192
+ namespace functional {
193
+
194
+ /// Options for `torch::nn::functional::embedding_bag`.
195
+ ///
196
+ /// Example:
197
+ /// ```
198
+ /// namespace F = torch::nn::functional;
199
+ /// F::embedding_bag(input, weight,
200
+ /// F::EmbeddingBagFuncOptions().mode(torch::kSum).offsets(offsets));
201
+ /// ```
202
+ struct TORCH_API EmbeddingBagFuncOptions {
203
+ /// Only used when `input` is 1D. `offsets` determines
204
+ /// the starting index position of each bag (sequence) in `input`.
205
+ TORCH_ARG(torch::Tensor, offsets) = Tensor();
206
+ /// If given, each embedding vector with norm larger than `max_norm` is
207
+ /// renormalized to have norm `max_norm`.
208
+ TORCH_ARG(c10::optional<double>, max_norm) = c10::nullopt;
209
+ /// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
210
+ TORCH_ARG(double, norm_type) = 2.;
211
+ /// If given, this will scale gradients by the inverse of frequency of the
212
+ /// words in the mini-batch. Default ``false``. Note: this option is not
213
+ /// supported when ``mode="kMax"``.
214
+ TORCH_ARG(bool, scale_grad_by_freq) = false;
215
+ /// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the
216
+ /// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights`
217
+ /// into consideration. ``"kMean"`` computes the average of the values in the
218
+ /// bag, ``"kMax"`` computes the max value over each bag.
219
+ TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean;
220
+ /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
221
+ /// Note: this option is not supported when ``mode="kMax"``.
222
+ TORCH_ARG(bool, sparse) = false;
223
+ /// a tensor of float / double weights, or None to indicate all weights should
224
+ /// be taken to be 1. If specified, `per_sample_weights` must have exactly the
225
+ /// same shape as input and is treated as having the same `offsets`, if those
226
+ /// are not None.
227
+ TORCH_ARG(torch::Tensor, per_sample_weights) = Tensor();
228
+ /// If ``true``, `offsets` has one additional element, where the last element
229
+ /// is equivalent to the size of `indices`. This matches the CSR format. Note:
230
+ /// this option is currently only supported when ``mode="sum"``.
231
+ TORCH_ARG(bool, include_last_offset) = false;
232
+ /// If specified, the entries at `padding_idx` do not contribute to the
233
+ /// gradient; therefore, the embedding vector at padding_idx is not updated
234
+ /// during training, i.e. it remains as a fixed "pad". Note that the embedding
235
+ /// vector at `padding_idx` is excluded from the reduction.
236
+ TORCH_ARG(c10::optional<int64_t>, padding_idx) = c10::nullopt;
237
+ };
238
+
239
+ } // namespace functional
240
+
241
+ } // namespace nn
242
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/fold.h ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/expanding_array.h>
6
+ #include <torch/types.h>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+
11
+ /// Options for the `Fold` module.
12
+ ///
13
+ /// Example:
14
+ /// ```
15
+ /// Fold model(FoldOptions({8, 8}, {3, 3}).dilation(2).padding({2,
16
+ /// 1}).stride(2));
17
+ /// ```
18
+ struct TORCH_API FoldOptions {
19
+ FoldOptions(ExpandingArray<2> output_size, ExpandingArray<2> kernel_size)
20
+ : output_size_(std::move(output_size)),
21
+ kernel_size_(std::move(kernel_size)) {}
22
+
23
+ /// describes the spatial shape of the large containing tensor of the sliding
24
+ /// local blocks. It is useful to resolve the ambiguity when multiple input
25
+ /// shapes map to same number of sliding blocks, e.g., with stride > 0.
26
+ TORCH_ARG(ExpandingArray<2>, output_size);
27
+
28
+ /// the size of the sliding blocks
29
+ TORCH_ARG(ExpandingArray<2>, kernel_size);
30
+
31
+ /// controls the spacing between the kernel points; also known as the à trous
32
+ /// algorithm.
33
+ TORCH_ARG(ExpandingArray<2>, dilation) = 1;
34
+
35
+ /// controls the amount of implicit zero-paddings on both sides for padding
36
+ /// number of points for each dimension before reshaping.
37
+ TORCH_ARG(ExpandingArray<2>, padding) = 0;
38
+
39
+ /// controls the stride for the sliding blocks.
40
+ TORCH_ARG(ExpandingArray<2>, stride) = 1;
41
+ };
42
+
43
+ namespace functional {
44
+ /// Options for `torch::nn::functional::fold`.
45
+ ///
46
+ /// See the documentation for `torch::nn::FoldOptions` class to learn what
47
+ /// arguments are supported.
48
+ ///
49
+ /// Example:
50
+ /// ```
51
+ /// namespace F = torch::nn::functional;
52
+ /// F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2}));
53
+ /// ```
54
+ using FoldFuncOptions = FoldOptions;
55
+ } // namespace functional
56
+
57
+ // ============================================================================
58
+
59
+ /// Options for the `Unfold` module.
60
+ ///
61
+ /// Example:
62
+ /// ```
63
+ /// Unfold model(UnfoldOptions({2, 4}).dilation(2).padding({2, 1}).stride(2));
64
+ /// ```
65
+ struct TORCH_API UnfoldOptions {
66
+ UnfoldOptions(ExpandingArray<2> kernel_size)
67
+ : kernel_size_(std::move(kernel_size)) {}
68
+
69
+ /// the size of the sliding blocks
70
+ TORCH_ARG(ExpandingArray<2>, kernel_size);
71
+
72
+ /// controls the spacing between the kernel points; also known as the à trous
73
+ /// algorithm.
74
+ TORCH_ARG(ExpandingArray<2>, dilation) = 1;
75
+
76
+ /// controls the amount of implicit zero-paddings on both sides for padding
77
+ /// number of points for each dimension before reshaping.
78
+ TORCH_ARG(ExpandingArray<2>, padding) = 0;
79
+
80
+ /// controls the stride for the sliding blocks.
81
+ TORCH_ARG(ExpandingArray<2>, stride) = 1;
82
+ };
83
+
84
+ namespace functional {
85
+ /// Options for `torch::nn::functional::unfold`.
86
+ ///
87
+ /// See the documentation for `torch::nn::UnfoldOptions` class to learn what
88
+ /// arguments are supported.
89
+ ///
90
+ /// Example:
91
+ /// ```
92
+ /// namespace F = torch::nn::functional;
93
+ /// F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2));
94
+ /// ```
95
+ using UnfoldFuncOptions = UnfoldOptions;
96
+ } // namespace functional
97
+
98
+ } // namespace nn
99
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/instancenorm.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/nn/options/batchnorm.h>
6
+ #include <torch/types.h>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+
11
+ /// Options for the `InstanceNorm` module.
12
+ struct TORCH_API InstanceNormOptions {
13
+ /* implicit */ InstanceNormOptions(int64_t num_features);
14
+
15
+ /// The number of features of the input tensor.
16
+ TORCH_ARG(int64_t, num_features);
17
+
18
+ /// The epsilon value added for numerical stability.
19
+ TORCH_ARG(double, eps) = 1e-5;
20
+
21
+ /// A momentum multiplier for the mean and variance.
22
+ TORCH_ARG(double, momentum) = 0.1;
23
+
24
+ /// Whether to learn a scale and bias that are applied in an affine
25
+ /// transformation on the input.
26
+ TORCH_ARG(bool, affine) = false;
27
+
28
+ /// Whether to store and update batch statistics (mean and variance) in the
29
+ /// module.
30
+ TORCH_ARG(bool, track_running_stats) = false;
31
+ };
32
+
33
+ /// Options for the `InstanceNorm1d` module.
34
+ ///
35
+ /// Example:
36
+ /// ```
37
+ /// InstanceNorm1d
38
+ /// model(InstanceNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
39
+ /// ```
40
+ using InstanceNorm1dOptions = InstanceNormOptions;
41
+
42
+ /// Options for the `InstanceNorm2d` module.
43
+ ///
44
+ /// Example:
45
+ /// ```
46
+ /// InstanceNorm2d
47
+ /// model(InstanceNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
48
+ /// ```
49
+ using InstanceNorm2dOptions = InstanceNormOptions;
50
+
51
+ /// Options for the `InstanceNorm3d` module.
52
+ ///
53
+ /// Example:
54
+ /// ```
55
+ /// InstanceNorm3d
56
+ /// model(InstanceNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
57
+ /// ```
58
+ using InstanceNorm3dOptions = InstanceNormOptions;
59
+
60
+ namespace functional {
61
+
62
+ /// Options for `torch::nn::functional::instance_norm`.
63
+ ///
64
+ /// Example:
65
+ /// ```
66
+ /// namespace F = torch::nn::functional;
67
+ /// F::instance_norm(input,
68
+ /// F::InstanceNormFuncOptions().running_mean(mean).running_var(variance).weight(weight).bias(bias).momentum(0.1).eps(1e-5));
69
+ /// ```
70
+ struct TORCH_API InstanceNormFuncOptions {
71
+ TORCH_ARG(Tensor, running_mean) = Tensor();
72
+
73
+ TORCH_ARG(Tensor, running_var) = Tensor();
74
+
75
+ TORCH_ARG(Tensor, weight) = Tensor();
76
+
77
+ TORCH_ARG(Tensor, bias) = Tensor();
78
+
79
+ TORCH_ARG(bool, use_input_stats) = true;
80
+
81
+ TORCH_ARG(double, momentum) = 0.1;
82
+
83
+ TORCH_ARG(double, eps) = 1e-5;
84
+ };
85
+
86
+ } // namespace functional
87
+
88
+ } // namespace nn
89
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/types.h>
6
+
7
+ namespace torch {
8
+ namespace nn {
9
+
10
+ /// Options for the `Linear` module.
11
+ ///
12
+ /// Example:
13
+ /// ```
14
+ /// Linear model(LinearOptions(5, 2).bias(false));
15
+ /// ```
16
+ struct TORCH_API LinearOptions {
17
+ LinearOptions(int64_t in_features, int64_t out_features);
18
+ /// size of each input sample
19
+ TORCH_ARG(int64_t, in_features);
20
+
21
+ /// size of each output sample
22
+ TORCH_ARG(int64_t, out_features);
23
+
24
+ /// If set to false, the layer will not learn an additive bias. Default: true
25
+ TORCH_ARG(bool, bias) = true;
26
+ };
27
+
28
+ // ============================================================================
29
+
30
+ /// Options for the `Flatten` module.
31
+ ///
32
+ /// Example:
33
+ /// ```
34
+ /// Flatten model(FlattenOptions().start_dim(2).end_dim(4));
35
+ /// ```
36
+ struct TORCH_API FlattenOptions {
37
+ /// first dim to flatten
38
+ TORCH_ARG(int64_t, start_dim) = 1;
39
+ /// last dim to flatten
40
+ TORCH_ARG(int64_t, end_dim) = -1;
41
+ };
42
+
43
+ // ============================================================================
44
+
45
+ /// Options for the `Unflatten` module.
46
+ ///
47
+ /// Note: If input tensor is named, use dimname and namedshape arguments.
48
+ ///
49
+ /// Example:
50
+ /// ```
51
+ /// Unflatten unnamed_model(UnflattenOptions(0, {2, 2}));
52
+ /// Unflatten named_model(UnflattenOptions("B", {{"B1", 2}, {"B2", 2}}));
53
+ /// ```
54
+ struct TORCH_API UnflattenOptions {
55
+ typedef std::vector<std::pair<std::string, int64_t>> namedshape_t;
56
+
57
+ UnflattenOptions(int64_t dim, std::vector<int64_t> sizes);
58
+ UnflattenOptions(const char* dimname, namedshape_t namedshape);
59
+ UnflattenOptions(std::string dimname, namedshape_t namedshape);
60
+
61
+ /// dim to unflatten
62
+ TORCH_ARG(int64_t, dim);
63
+ /// name of dim to unflatten, for use with named tensors
64
+ TORCH_ARG(std::string, dimname);
65
+ /// new shape of unflattened dim
66
+ TORCH_ARG(std::vector<int64_t>, sizes);
67
+ /// new shape of unflattened dim with names, for use with named tensors
68
+ TORCH_ARG(namedshape_t, namedshape);
69
+ };
70
+
71
+ // ============================================================================
72
+
73
+ /// Options for the `Bilinear` module.
74
+ ///
75
+ /// Example:
76
+ /// ```
77
+ /// Bilinear model(BilinearOptions(3, 2, 4).bias(false));
78
+ /// ```
79
+ struct TORCH_API BilinearOptions {
80
+ BilinearOptions(
81
+ int64_t in1_features,
82
+ int64_t in2_features,
83
+ int64_t out_features);
84
+ /// The number of features in input 1 (columns of the input1 matrix).
85
+ TORCH_ARG(int64_t, in1_features);
86
+ /// The number of features in input 2 (columns of the input2 matrix).
87
+ TORCH_ARG(int64_t, in2_features);
88
+ /// The number of output features to produce (columns of the output matrix).
89
+ TORCH_ARG(int64_t, out_features);
90
+ /// Whether to learn and add a bias after the bilinear transformation.
91
+ TORCH_ARG(bool, bias) = true;
92
+ };
93
+
94
+ } // namespace nn
95
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h ADDED
@@ -0,0 +1,802 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/enum.h>
6
+ #include <torch/types.h>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+
11
+ /// Options for the `L1Loss` module.
12
+ ///
13
+ /// Example:
14
+ /// ```
15
+ /// L1Loss model(L1LossOptions(torch::kNone));
16
+ /// ```
17
+ struct TORCH_API L1LossOptions {
18
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
19
+ reduction_t;
20
+
21
+ TORCH_OPTIONS_CTOR_VARIANT_ARG3(L1LossOptions, reduction, kNone, kMean, kSum)
22
+
23
+ /// Specifies the reduction to apply to the output.
24
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
25
+ };
26
+
27
+ namespace functional {
28
+ /// Options for `torch::nn::functional::l1_loss`.
29
+ ///
30
+ /// See the documentation for `torch::nn::L1LossOptions` class to learn what
31
+ /// arguments are supported.
32
+ ///
33
+ /// Example:
34
+ /// ```
35
+ /// namespace F = torch::nn::functional;
36
+ /// F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone));
37
+ /// ```
38
+ using L1LossFuncOptions = L1LossOptions;
39
+ } // namespace functional
40
+
41
+ // ============================================================================
42
+
43
+ /// Options for the `KLDivLoss` module.
44
+ ///
45
+ /// Example:
46
+ /// ```
47
+ /// KLDivLoss
48
+ /// model(KLDivLossOptions().reduction(torch::kNone).log_target(false));
49
+ /// ```
50
+ struct TORCH_API KLDivLossOptions {
51
+ typedef std::variant<
52
+ enumtype::kNone,
53
+ enumtype::kBatchMean,
54
+ enumtype::kSum,
55
+ enumtype::kMean>
56
+ reduction_t;
57
+
58
+ TORCH_OPTIONS_CTOR_VARIANT_ARG4(
59
+ KLDivLossOptions,
60
+ reduction,
61
+ kNone,
62
+ kBatchMean,
63
+ kSum,
64
+ kMean)
65
+
66
+ /// Specifies the reduction to apply to the output.
67
+ /// ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. Default: ``'mean'``
68
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
69
+
70
+ /// Specifies whether `target` is accepted in the log space. Default: False
71
+ TORCH_ARG(bool, log_target) = false;
72
+ };
73
+
74
+ namespace functional {
75
+ /// Options for `torch::nn::functional::kl_div`.
76
+ ///
77
+ /// See the documentation for `torch::nn::KLDivLossOptions` class to learn what
78
+ /// arguments are supported.
79
+ ///
80
+ /// Example:
81
+ /// ```
82
+ /// namespace F = torch::nn::functional;
83
+ /// F::kl_div(input, target,
84
+ /// F::KLDivFuncOptions().reduction(torch::kNone).log_target(false));
85
+ /// ```
86
+ using KLDivFuncOptions = KLDivLossOptions;
87
+ } // namespace functional
88
+
89
+ // ============================================================================
90
+
91
+ /// Options for the `MSELoss` module.
92
+ ///
93
+ /// Example:
94
+ /// ```
95
+ /// MSELoss model(MSELossOptions(torch::kNone));
96
+ /// ```
97
+ struct TORCH_API MSELossOptions {
98
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
99
+ reduction_t;
100
+
101
+ TORCH_OPTIONS_CTOR_VARIANT_ARG3(MSELossOptions, reduction, kNone, kMean, kSum)
102
+
103
+ /// Specifies the reduction to apply to the output.
104
+ /// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``
105
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
106
+ };
107
+
108
+ namespace functional {
109
+ /// Options for `torch::nn::functional::mse_loss`.
110
+ ///
111
+ /// See the documentation for `torch::nn::MSELossOptions` class to learn what
112
+ /// arguments are supported.
113
+ ///
114
+ /// Example:
115
+ /// ```
116
+ /// namespace F = torch::nn::functional;
117
+ /// F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone));
118
+ /// ```
119
+ using MSELossFuncOptions = MSELossOptions;
120
+ } // namespace functional
121
+
122
+ // ============================================================================
123
+
124
+ /// Options for the `BCELoss` module.
125
+ ///
126
+ /// Example:
127
+ /// ```
128
+ /// BCELoss model(BCELossOptions().reduction(torch::kNone).weight(weight));
129
+ /// ```
130
+ struct TORCH_API BCELossOptions {
131
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
132
+ reduction_t;
133
+
134
+ /// A manual rescaling weight given to the loss of each batch element.
135
+ TORCH_ARG(Tensor, weight) = {};
136
+ /// Specifies the reduction to apply to the output.
137
+ /// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``
138
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
139
+ };
140
+
141
+ namespace functional {
142
+ /// Options for `torch::nn::functional::binary_cross_entropy`.
143
+ ///
144
+ /// See the documentation for `torch::nn::BCELossOptions` class to learn what
145
+ /// arguments are supported.
146
+ ///
147
+ /// Example:
148
+ /// ```
149
+ /// namespace F = torch::nn::functional;
150
+ /// F::binary_cross_entropy(input, target,
151
+ /// F::BinaryCrossEntropyFuncOptions().weight(weight));
152
+ /// ```
153
+ using BinaryCrossEntropyFuncOptions = BCELossOptions;
154
+ } // namespace functional
155
+
156
+ // ============================================================================
157
+
158
+ /// Options for the `HingeEmbeddingLoss` module.
159
+ ///
160
+ /// Example:
161
+ /// ```
162
+ /// HingeEmbeddingLoss
163
+ /// model(HingeEmbeddingLossOptions().margin(4).reduction(torch::kNone));
164
+ /// ```
165
+ struct TORCH_API HingeEmbeddingLossOptions {
166
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
167
+ reduction_t;
168
+
169
+ /// Specifies the threshold for which the distance of a negative sample must
170
+ /// reach in order to incur zero loss. Default: 1
171
+ TORCH_ARG(double, margin) = 1.0;
172
+ /// Specifies the reduction to apply to the output. Default: Mean
173
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
174
+ };
175
+
176
+ namespace functional {
177
+ /// Options for `torch::nn::functional::hinge_embedding_loss`.
178
+ ///
179
+ /// See the documentation for `torch::nn::HingeEmbeddingLossOptions` class to
180
+ /// learn what arguments are supported.
181
+ ///
182
+ /// Example:
183
+ /// ```
184
+ /// namespace F = torch::nn::functional;
185
+ /// F::hinge_embedding_loss(input, target,
186
+ /// F::HingeEmbeddingLossFuncOptions().margin(2));
187
+ /// ```
188
+ using HingeEmbeddingLossFuncOptions = HingeEmbeddingLossOptions;
189
+ } // namespace functional
190
+
191
+ // ============================================================================
192
+
193
+ /// Options for the `MultiMarginLoss` module.
194
+ ///
195
+ /// Example:
196
+ /// ```
197
+ /// MultiMarginLoss model(MultiMarginLossOptions().margin(2).weight(weight));
198
+ /// ```
199
+ struct TORCH_API MultiMarginLossOptions {
200
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
201
+ reduction_t;
202
+
203
+ /// Has a default value of :math:`1`. :math:`1` and :math:`2`
204
+ /// are the only supported values.
205
+ TORCH_ARG(int64_t, p) = 1;
206
+ /// Has a default value of :math:`1`.
207
+ TORCH_ARG(double, margin) = 1.0;
208
+ /// A manual rescaling weight given to each
209
+ /// class. If given, it has to be a Tensor of size `C`. Otherwise, it is
210
+ /// treated as if having all ones.
211
+ TORCH_ARG(Tensor, weight) = Tensor();
212
+ /// Specifies the reduction to apply to the output:
213
+ /// ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
214
+ /// applied,
215
+ /// ``'mean'``: the sum of the output will be divided by the number of
216
+ /// elements in the output, ``'sum'``: the output will be summed. Default:
217
+ /// ``'mean'``
218
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
219
+ };
220
+
221
+ namespace functional {
222
+ /// Options for `torch::nn::functional::multi_margin_loss`.
223
+ ///
224
+ /// See the documentation for `torch::nn::MultiMarginLossOptions` class to learn
225
+ /// what arguments are supported.
226
+ ///
227
+ /// Example:
228
+ /// ```
229
+ /// namespace F = torch::nn::functional;
230
+ /// F::multi_margin_loss(input, target,
231
+ /// F::MultiMarginLossFuncOptions().margin(2).weight(weight));
232
+ /// ```
233
+ using MultiMarginLossFuncOptions = MultiMarginLossOptions;
234
+ } // namespace functional
235
+
236
+ // ============================================================================
237
+
238
+ /// Options for the `CosineEmbeddingLoss` module.
239
+ ///
240
+ /// Example:
241
+ /// ```
242
+ /// CosineEmbeddingLoss model(CosineEmbeddingLossOptions().margin(0.5));
243
+ /// ```
244
+ struct TORCH_API CosineEmbeddingLossOptions {
245
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
246
+ reduction_t;
247
+
248
+ /// Specifies the threshold for which the distance of a negative sample must
249
+ /// reach in order to incur zero loss. Should be a number from -1 to 1, 0
250
+ /// to 0.5 is suggested. Default: 0.0
251
+ TORCH_ARG(double, margin) = 0.0;
252
+ /// Specifies the reduction to apply to the output. Default: Mean
253
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
254
+ };
255
+
256
+ namespace functional {
257
+ /// Options for `torch::nn::functional::cosine_embedding_loss`.
258
+ ///
259
+ /// See the documentation for `torch::nn::CosineEmbeddingLossOptions` class to
260
+ /// learn what arguments are supported.
261
+ ///
262
+ /// Example:
263
+ /// ```
264
+ /// namespace F = torch::nn::functional;
265
+ /// F::cosine_embedding_loss(input1, input2, target,
266
+ /// F::CosineEmbeddingLossFuncOptions().margin(0.5));
267
+ /// ```
268
+ using CosineEmbeddingLossFuncOptions = CosineEmbeddingLossOptions;
269
+ } // namespace functional
270
+
271
+ // ============================================================================
272
+
273
+ /// Options for the `MultiLabelMarginLoss` module.
274
+ ///
275
+ /// Example:
276
+ /// ```
277
+ /// MultiLabelMarginLoss model(MultiLabelMarginLossOptions(torch::kNone));
278
+ /// ```
279
+ struct TORCH_API MultiLabelMarginLossOptions {
280
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
281
+ reduction_t;
282
+
283
+ TORCH_OPTIONS_CTOR_VARIANT_ARG3(
284
+ MultiLabelMarginLossOptions,
285
+ reduction,
286
+ kNone,
287
+ kMean,
288
+ kSum)
289
+
290
+ /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
291
+ /// 'none': no reduction will be applied, 'mean': the sum of the output will
292
+ /// be divided by the number of elements in the output, 'sum': the output will
293
+ /// be summed. Default: 'mean'
294
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
295
+ };
296
+
297
+ namespace functional {
298
+ /// Options for `torch::nn::functional::multilabel_margin_loss`.
299
+ ///
300
+ /// See the documentation for `torch::nn::MultiLabelMarginLossOptions` class to
301
+ /// learn what arguments are supported.
302
+ ///
303
+ /// Example:
304
+ /// ```
305
+ /// namespace F = torch::nn::functional;
306
+ /// F::multilabel_margin_loss(input, target,
307
+ /// F::MultilabelMarginLossFuncOptions(torch::kNone));
308
+ /// ```
309
+ using MultilabelMarginLossFuncOptions = MultiLabelMarginLossOptions;
310
+ } // namespace functional
311
+
312
+ // ============================================================================
313
+
314
+ /// Options for the `SoftMarginLoss` module.
315
+ ///
316
+ /// Example:
317
+ /// ```
318
+ /// SoftMarginLoss model(SoftMarginLossOptions(torch::kNone));
319
+ /// ```
320
+ struct TORCH_API SoftMarginLossOptions {
321
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
322
+ reduction_t;
323
+
324
+ TORCH_OPTIONS_CTOR_VARIANT_ARG3(
325
+ SoftMarginLossOptions,
326
+ reduction,
327
+ kNone,
328
+ kMean,
329
+ kSum)
330
+
331
+ /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
332
+ /// 'none': no reduction will be applied, 'mean': the sum of the output will
333
+ /// be divided by the number of elements in the output, 'sum': the output will
334
+ /// be summed. Default: 'mean'
335
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
336
+ };
337
+
338
+ namespace functional {
339
+ /// Options for `torch::nn::functional::soft_margin_loss`.
340
+ ///
341
+ /// See the documentation for `torch::nn::SoftMarginLossOptions` class to learn
342
+ /// what arguments are supported.
343
+ ///
344
+ /// Example:
345
+ /// ```
346
+ /// namespace F = torch::nn::functional;
347
+ /// F::soft_margin_loss(input, target,
348
+ /// F::SoftMarginLossFuncOptions(torch::kNone));
349
+ /// ```
350
+ using SoftMarginLossFuncOptions = SoftMarginLossOptions;
351
+ } // namespace functional
352
+
353
+ // ============================================================================
354
+
355
+ /// Options for the `MultiLabelSoftMarginLoss` module.
356
+ ///
357
+ /// Example:
358
+ /// ```
359
+ /// MultiLabelSoftMarginLoss
360
+ /// model(MultiLabelSoftMarginLossOptions().reduction(torch::kNone).weight(weight));
361
+ /// ```
362
+ struct TORCH_API MultiLabelSoftMarginLossOptions {
363
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
364
+ reduction_t;
365
+
366
+ /// A manual rescaling weight given to each
367
+ /// class. If given, it has to be a Tensor of size `C`. Otherwise, it is
368
+ /// treated as if having all ones.
369
+ TORCH_ARG(Tensor, weight) = Tensor();
370
+
371
+ /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
372
+ /// 'none': no reduction will be applied, 'mean': the sum of the output will
373
+ /// be divided by the number of elements in the output, 'sum': the output will
374
+ /// be summed. Default: 'mean'
375
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
376
+ };
377
+
378
+ namespace functional {
379
+ /// Options for `torch::nn::functional::multilabel_soft_margin_loss`.
380
+ ///
381
+ /// See the documentation for `torch::nn::MultiLabelSoftMarginLossOptions` class
382
+ /// to learn what arguments are supported.
383
+ ///
384
+ /// Example:
385
+ /// ```
386
+ /// namespace F = torch::nn::functional;
387
+ /// F::multilabel_soft_margin_loss(input, target,
388
+ /// F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight));
389
+ /// ```
390
+ using MultilabelSoftMarginLossFuncOptions = MultiLabelSoftMarginLossOptions;
391
+ } // namespace functional
392
+
393
+ // ============================================================================
394
+
395
+ /// Options for the `TripletMarginLoss` module.
396
+ ///
397
+ /// Example:
398
+ /// ```
399
+ /// TripletMarginLoss
400
+ /// model(TripletMarginLossOptions().margin(3).p(2).eps(1e-06).swap(false));
401
+ /// ```
402
+ struct TORCH_API TripletMarginLossOptions {
403
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
404
+ reduction_t;
405
+
406
+ /// Specifies the threshold for which the distance of a negative sample must
407
+ /// reach in order to incur zero loss. Default: 1
408
+ TORCH_ARG(double, margin) = 1.0;
409
+ /// Specifies the norm degree for pairwise distance. Default: 2
410
+ TORCH_ARG(double, p) = 2.0;
411
+ TORCH_ARG(double, eps) = 1e-6;
412
+ /// The distance swap is described in detail in the paper Learning shallow
413
+ /// convolutional feature descriptors with triplet losses by V. Balntas,
414
+ /// E. Riba et al. Default: False
415
+ TORCH_ARG(bool, swap) = false;
416
+ /// Specifies the reduction to apply to the output. Default: Mean
417
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
418
+ };
419
+
420
+ namespace functional {
421
+ /// Options for `torch::nn::functional::triplet_margin_loss`.
422
+ ///
423
+ /// See the documentation for `torch::nn::TripletMarginLossOptions` class to
424
+ /// learn what arguments are supported.
425
+ ///
426
+ /// Example:
427
+ /// ```
428
+ /// namespace F = torch::nn::functional;
429
+ /// F::triplet_margin_loss(anchor, positive, negative,
430
+ /// F::TripletMarginLossFuncOptions().margin(1.0));
431
+ /// ```
432
+ using TripletMarginLossFuncOptions = TripletMarginLossOptions;
433
+ } // namespace functional
434
+
435
+ // ============================================================================
436
+
437
+ /// Options for the `TripletMarginWithDistanceLoss` module.
438
+ ///
439
+ /// Example:
440
+ /// ```
441
+ /// TripletMarginWithDistanceLoss
442
+ /// model(TripletMarginWithDistanceLossOptions().margin(3).swap(false));
443
+ /// ```
444
+ struct TORCH_API TripletMarginWithDistanceLossOptions {
445
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
446
+ reduction_t;
447
+ typedef std::function<Tensor(const Tensor&, const Tensor&)>
448
+ distance_function_t;
449
+
450
+ /// Specifies a nonnegative, real-valued function that quantifies the
451
+ /// closeness of two tensors. If not specified, `F::pairwise_distance` will
452
+ /// be used. Default: nullopt
453
+ TORCH_ARG(c10::optional<distance_function_t>, distance_function) =
454
+ c10::nullopt;
455
+ /// Specifies a nonnegative margin representing the minimum difference
456
+ /// between the positive and negative distances required for the loss to be 0.
457
+ /// Larger margins penalize cases where the negative examples are not distance
458
+ /// enough from the anchors, relative to the positives. Default: 1
459
+ TORCH_ARG(double, margin) = 1.0;
460
+ /// Whether to use the distance swap described in the paper Learning shallow
461
+ /// convolutional feature descriptors with triplet losses by V. Balntas,
462
+ /// E. Riba et al. If True, and if the positive example is closer to the
463
+ /// negative example than the anchor is, swaps the positive example and the
464
+ /// anchor in the loss computation. Default: False
465
+ TORCH_ARG(bool, swap) = false;
466
+ /// Specifies the reduction to apply to the output. Default: Mean
467
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
468
+ };
469
+
470
+ namespace functional {
471
+ /// Options for `torch::nn::functional::triplet_margin_with_distance_loss`.
472
+ ///
473
+ /// See the documentation for `torch::nn::TripletMarginWithDistanceLossOptions`
474
+ /// class to learn what arguments are supported.
475
+ ///
476
+ /// Example:
477
+ /// ```
478
+ /// namespace F = torch::nn::functional;
479
+ /// F::triplet_margin_with_distance_loss(anchor, positive, negative,
480
+ /// F::TripletMarginWithDistanceLossFuncOptions().margin(1.0));
481
+ /// ```
482
+ using TripletMarginWithDistanceLossFuncOptions =
483
+ TripletMarginWithDistanceLossOptions;
484
+ } // namespace functional
485
+
486
+ // ============================================================================
487
+
488
+ /// Options for the `CTCLoss` module.
489
+ ///
490
+ /// Example:
491
+ /// ```
492
+ /// CTCLoss
493
+ /// model(CTCLossOptions().blank(42).zero_infinity(false).reduction(torch::kSum));
494
+ /// ```
495
+ struct TORCH_API CTCLossOptions {
496
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
497
+ reduction_t;
498
+
499
+ /// blank label. Default `0`.
500
+ TORCH_ARG(int64_t, blank) = 0;
501
+ /// Specifies the reduction to apply to the output. Default: Mean
502
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
503
+ /// Whether to zero infinite losses and the associated gradients.
504
+ /// Default: `false`. Infinite losses mainly occur when the inputs are
505
+ /// too short to be aligned to the targets.
506
+ TORCH_ARG(bool, zero_infinity) = false;
507
+ };
508
+
509
+ namespace functional {
510
+ /// Options for `torch::nn::functional::ctc_loss`.
511
+ ///
512
+ /// See the documentation for `torch::nn::CTCLossOptions` class to learn what
513
+ /// arguments are supported.
514
+ ///
515
+ /// Example:
516
+ /// ```
517
+ /// namespace F = torch::nn::functional;
518
+ /// F::ctc_loss(log_probs, targets, input_lengths, target_lengths,
519
+ /// F::CTCLossFuncOptions().reduction(torch::kNone));
520
+ /// ```
521
+ using CTCLossFuncOptions = CTCLossOptions;
522
+ } // namespace functional
523
+
524
+ // ============================================================================
525
+
526
+ /// Options for the `SmoothL1Loss` module.
527
+ ///
528
+ /// Example:
529
+ /// ```
530
+ /// SmoothL1Loss model(SmoothL1LossOptions().reduction(torch::kNone).beta(0.5));
531
+ /// ```
532
+ struct TORCH_API SmoothL1LossOptions {
533
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
534
+ reduction_t;
535
+
536
+ TORCH_OPTIONS_CTOR_VARIANT_ARG3(
537
+ SmoothL1LossOptions,
538
+ reduction,
539
+ kNone,
540
+ kMean,
541
+ kSum)
542
+
543
+ /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
544
+ /// 'none': no reduction will be applied, 'mean': the sum of the output will
545
+ /// be divided by the number of elements in the output, 'sum': the output will
546
+ /// be summed. Default: 'mean'
547
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
548
+ /// Specifies the threshold at which to change between L1 and L2 loss.
549
+ /// If beta is not specified, a value of 1.0 will be used.
550
+ /// Default: nullopt
551
+ TORCH_ARG(c10::optional<double>, beta) = c10::nullopt;
552
+ };
553
+
554
+ namespace functional {
555
+ /// Options for `torch::nn::functional::smooth_l1_loss`.
556
+ ///
557
+ /// See the documentation for `torch::nn::SmoothL1LossOptions` class to learn
558
+ /// what arguments are supported.
559
+ ///
560
+ /// Example:
561
+ /// ```
562
+ /// namespace F = torch::nn::functional;
563
+ /// F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone));
564
+ /// ```
565
+ using SmoothL1LossFuncOptions = SmoothL1LossOptions;
566
+ } // namespace functional
567
+
568
+ // ============================================================================
569
+
570
+ /// Options for the `HuberLoss` module.
571
+ ///
572
+ /// Example:
573
+ /// ```
574
+ /// HuberLoss model(HuberLossOptions().reduction(torch::kNone).delta(0.5));
575
+ /// ```
576
+ struct TORCH_API HuberLossOptions {
577
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
578
+ reduction_t;
579
+
580
+ TORCH_OPTIONS_CTOR_VARIANT_ARG3(
581
+ HuberLossOptions,
582
+ reduction,
583
+ kNone,
584
+ kMean,
585
+ kSum)
586
+
587
+ /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
588
+ /// 'none': no reduction will be applied, 'mean': the sum of the output will
589
+ /// be divided by the number of elements in the output, 'sum': the output will
590
+ /// be summed. Default: 'mean'
591
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
592
+ /// Specifies the threshold at which to change between L1 and L2 loss.
593
+ /// Default: 1.0
594
+ TORCH_ARG(double, delta) = 1.0;
595
+ };
596
+
597
+ namespace functional {
598
+ /// Options for `torch::nn::functional::huber_loss`.
599
+ ///
600
+ /// See the documentation for `torch::nn::HuberLossOptions` class to learn what
601
+ /// arguments are supported.
602
+ ///
603
+ /// Example:
604
+ /// ```
605
+ /// namespace F = torch::nn::functional;
606
+ /// F::huber_loss(input, target, F::HuberLossFuncOptions(torch::kNone));
607
+ /// ```
608
+ using HuberLossFuncOptions = HuberLossOptions;
609
+ } // namespace functional
610
+
611
+ // ============================================================================
612
+
613
+ /// Options for the `PoissonNLLLoss` module.
614
+ ///
615
+ /// Example:
616
+ /// ```
617
+ /// PoissonNLLLoss
618
+ /// model(PoissonNLLLossOptions().log_input(false).full(true).eps(0.42).reduction(torch::kSum));
619
+ /// ```
620
+ struct TORCH_API PoissonNLLLossOptions {
621
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
622
+ reduction_t;
623
+
624
+ /// if true the loss is computed as `exp(input) - target * input`,
625
+ /// if false the loss is `input - target * log(input + eps)`.
626
+ TORCH_ARG(bool, log_input) = true;
627
+ /// whether to compute full loss, i.e. to add the Stirling approximation term
628
+ /// target * log(target) - target + 0.5 * log(2 * pi * target).
629
+ TORCH_ARG(bool, full) = false;
630
+ /// Small value to avoid evaluation of `log(0)` when `log_input = false`.
631
+ /// Default: 1e-8
632
+ TORCH_ARG(double, eps) = 1e-8;
633
+ /// Specifies the reduction to apply to the output. Default: Mean
634
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
635
+ };
636
+
637
+ namespace functional {
638
+ /// Options for `torch::nn::functional::poisson_nll_loss`.
639
+ ///
640
+ /// See the documentation for `torch::nn::PoissonNLLLossOptions` class to learn
641
+ /// what arguments are supported.
642
+ ///
643
+ /// Example:
644
+ /// ```
645
+ /// namespace F = torch::nn::functional;
646
+ /// F::poisson_nll_loss(input, target,
647
+ /// F::PoissonNLLLossFuncOptions().reduction(torch::kNone));
648
+ /// ```
649
+ using PoissonNLLLossFuncOptions = PoissonNLLLossOptions;
650
+ } // namespace functional
651
+
652
+ // ============================================================================
653
+
654
+ /// Options for the `MarginRankingLoss` module.
655
+ ///
656
+ /// Example:
657
+ /// ```
658
+ /// MarginRankingLoss
659
+ /// model(MarginRankingLossOptions().margin(0.5).reduction(torch::kSum));
660
+ /// ```
661
+ struct TORCH_API MarginRankingLossOptions {
662
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
663
+ reduction_t;
664
+
665
+ /// Has a default value of `0`.
666
+ TORCH_ARG(double, margin) = 0;
667
+ /// Specifies the reduction to apply to the output. Default: Mean
668
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
669
+ };
670
+
671
+ namespace functional {
672
+ /// Options for `torch::nn::functional::margin_ranking_loss`.
673
+ ///
674
+ /// See the documentation for `torch::nn::MarginRankingLossOptions` class to
675
+ /// learn what arguments are supported.
676
+ ///
677
+ /// Example:
678
+ /// ```
679
+ /// namespace F = torch::nn::functional;
680
+ /// F::margin_ranking_loss(input1, input2, target,
681
+ /// F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum));
682
+ /// ```
683
+ using MarginRankingLossFuncOptions = MarginRankingLossOptions;
684
+ } // namespace functional
685
+
686
+ // ============================================================================
687
+
688
+ /// Options for the `NLLLoss` module.
689
+ ///
690
+ /// Example:
691
+ /// ```
692
+ /// NLLLoss model(NLLLossOptions().ignore_index(-100).reduction(torch::kMean));
693
+ /// ```
694
+ struct TORCH_API NLLLossOptions {
695
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
696
+ reduction_t;
697
+
698
+ /// A manual rescaling weight given to each
699
+ /// class. If given, it has to be a Tensor of size `C`. Otherwise, it is
700
+ /// treated as if having all ones.
701
+ TORCH_ARG(Tensor, weight) = {};
702
+ /// Specifies a target value that is ignored
703
+ /// and does not contribute to the input gradient.
704
+ TORCH_ARG(int64_t, ignore_index) = -100;
705
+ /// Specifies the reduction to apply to the output. Default: Mean
706
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
707
+ };
708
+
709
+ namespace functional {
710
+ /// Options for `torch::nn::functional::nll_loss`.
711
+ ///
712
+ /// See the documentation for `torch::nn::NLLLossOptions` class to learn what
713
+ /// arguments are supported.
714
+ ///
715
+ /// Example:
716
+ /// ```
717
+ /// namespace F = torch::nn::functional;
718
+ /// F::nll_loss(input, target,
719
+ /// F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean));
720
+ /// ```
721
+ using NLLLossFuncOptions = NLLLossOptions;
722
+ } // namespace functional
723
+
724
+ // ============================================================================
725
+
726
+ /// Options for the `CrossEntropyLoss` module.
727
+ ///
728
+ /// Example:
729
+ /// ```
730
+ /// CrossEntropyLoss
731
+ /// model(CrossEntropyLossOptions().ignore_index(-100).reduction(torch::kMean));
732
+ /// ```
733
+ struct TORCH_API CrossEntropyLossOptions {
734
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
735
+ reduction_t;
736
+
737
+ /// A manual rescaling weight given to each class. If given, has to be a
738
+ /// Tensor of size C
739
+ TORCH_ARG(Tensor, weight) = {};
740
+ /// Specifies a target value that is ignored
741
+ /// and does not contribute to the input gradient.
742
+ TORCH_ARG(int64_t, ignore_index) = -100;
743
+ /// Specifies the reduction to apply to the output. Default: Mean
744
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
745
+ /// Specifies the amount of smoothing when computing the loss. Default: 0.0
746
+ TORCH_ARG(double, label_smoothing) = 0.0;
747
+ };
748
+
749
+ namespace functional {
750
+ /// Options for `torch::nn::functional::cross_entropy`.
751
+ ///
752
+ /// See the documentation for `torch::nn::CrossEntropyLossOptions` class to
753
+ /// learn what arguments are supported.
754
+ ///
755
+ /// Example:
756
+ /// ```
757
+ /// namespace F = torch::nn::functional;
758
+ /// F::cross_entropy(input, target,
759
+ /// F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean));
760
+ /// ```
761
+ using CrossEntropyFuncOptions = CrossEntropyLossOptions;
762
+ } // namespace functional
763
+
764
+ // ============================================================================
765
+
766
+ /// Options for the `BCEWithLogitsLoss` module.
767
+ ///
768
+ /// Example:
769
+ /// ```
770
+ /// BCEWithLogitsLoss
771
+ /// model(BCEWithLogitsLossOptions().reduction(torch::kNone).weight(weight));
772
+ /// ```
773
+ struct TORCH_API BCEWithLogitsLossOptions {
774
+ typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
775
+ reduction_t;
776
+ /// A manual rescaling weight given to the loss of each batch element.
777
+ /// If given, has to be a Tensor of size `nbatch`.
778
+ TORCH_ARG(Tensor, weight) = {};
779
+ /// Specifies the reduction to apply to the output. Default: Mean
780
+ TORCH_ARG(reduction_t, reduction) = torch::kMean;
781
+ /// A weight of positive examples.
782
+ /// Must be a vector with length equal to the number of classes.
783
+ TORCH_ARG(Tensor, pos_weight) = {};
784
+ };
785
+
786
+ namespace functional {
787
+ /// Options for `torch::nn::functional::binary_cross_entropy_with_logits`.
788
+ ///
789
+ /// See the documentation for `torch::nn::BCEWithLogitsLossOptions` class to
790
+ /// learn what arguments are supported.
791
+ ///
792
+ /// Example:
793
+ /// ```
794
+ /// namespace F = torch::nn::functional;
795
+ /// F::binary_cross_entropy_with_logits(input, target,
796
+ /// F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum));
797
+ /// ```
798
+ using BinaryCrossEntropyWithLogitsFuncOptions = BCEWithLogitsLossOptions;
799
+ } // namespace functional
800
+
801
+ } // namespace nn
802
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/normalization.h ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/types.h>
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+
11
+ /// Options for the `LayerNorm` module.
12
+ ///
13
+ /// Example:
14
+ /// ```
15
+ /// LayerNorm model(LayerNormOptions({2,
16
+ /// 2}).elementwise_affine(false).eps(2e-5));
17
+ /// ```
18
+ struct TORCH_API LayerNormOptions {
19
+ /* implicit */ LayerNormOptions(std::vector<int64_t> normalized_shape);
20
+ /// input shape from an expected input.
21
+ TORCH_ARG(std::vector<int64_t>, normalized_shape);
22
+ /// a value added to the denominator for numerical stability. ``Default:
23
+ /// 1e-5``.
24
+ TORCH_ARG(double, eps) = 1e-5;
25
+ /// a boolean value that when set to ``true``, this module
26
+ /// has learnable per-element affine parameters initialized to ones (for
27
+ /// weights) and zeros (for biases). ``Default: true``.
28
+ TORCH_ARG(bool, elementwise_affine) = true;
29
+ };
30
+
31
+ // ============================================================================
32
+
33
+ namespace functional {
34
+
35
+ /// Options for `torch::nn::functional::layer_norm`.
36
+ ///
37
+ /// Example:
38
+ /// ```
39
+ /// namespace F = torch::nn::functional;
40
+ /// F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5));
41
+ /// ```
42
+ struct TORCH_API LayerNormFuncOptions {
43
+ /* implicit */ LayerNormFuncOptions(std::vector<int64_t> normalized_shape);
44
+ /// input shape from an expected input.
45
+ TORCH_ARG(std::vector<int64_t>, normalized_shape);
46
+
47
+ TORCH_ARG(Tensor, weight) = {};
48
+
49
+ TORCH_ARG(Tensor, bias) = {};
50
+
51
+ /// a value added to the denominator for numerical stability. ``Default:
52
+ /// 1e-5``.
53
+ TORCH_ARG(double, eps) = 1e-5;
54
+ };
55
+
56
+ } // namespace functional
57
+
58
+ // ============================================================================
59
+
60
+ /// Options for the `LocalResponseNorm` module.
61
+ ///
62
+ /// Example:
63
+ /// ```
64
+ /// LocalResponseNorm
65
+ /// model(LocalResponseNormOptions(2).alpha(0.0002).beta(0.85).k(2.));
66
+ /// ```
67
+ struct TORCH_API LocalResponseNormOptions {
68
+ /* implicit */ LocalResponseNormOptions(int64_t size) : size_(size) {}
69
+ /// amount of neighbouring channels used for normalization
70
+ TORCH_ARG(int64_t, size);
71
+
72
+ /// multiplicative factor. Default: 1e-4
73
+ TORCH_ARG(double, alpha) = 1e-4;
74
+
75
+ /// exponent. Default: 0.75
76
+ TORCH_ARG(double, beta) = 0.75;
77
+
78
+ /// additive factor. Default: 1
79
+ TORCH_ARG(double, k) = 1.;
80
+ };
81
+
82
+ namespace functional {
83
+ /// Options for `torch::nn::functional::local_response_norm`.
84
+ ///
85
+ /// See the documentation for `torch::nn::LocalResponseNormOptions` class to
86
+ /// learn what arguments are supported.
87
+ ///
88
+ /// Example:
89
+ /// ```
90
+ /// namespace F = torch::nn::functional;
91
+ /// F::local_response_norm(x, F::LocalResponseNormFuncOptions(2));
92
+ /// ```
93
+ using LocalResponseNormFuncOptions = LocalResponseNormOptions;
94
+ } // namespace functional
95
+
96
+ // ============================================================================
97
+
98
+ /// Options for the `CrossMapLRN2d` module.
99
+ ///
100
+ /// Example:
101
+ /// ```
102
+ /// CrossMapLRN2d model(CrossMapLRN2dOptions(3).alpha(1e-5).beta(0.1).k(10));
103
+ /// ```
104
+ struct TORCH_API CrossMapLRN2dOptions {
105
+ CrossMapLRN2dOptions(int64_t size);
106
+
107
+ TORCH_ARG(int64_t, size);
108
+
109
+ TORCH_ARG(double, alpha) = 1e-4;
110
+
111
+ TORCH_ARG(double, beta) = 0.75;
112
+
113
+ TORCH_ARG(int64_t, k) = 1;
114
+ };
115
+
116
+ // ============================================================================
117
+
118
+ namespace functional {
119
+
120
+ /// Options for `torch::nn::functional::normalize`.
121
+ ///
122
+ /// Example:
123
+ /// ```
124
+ /// namespace F = torch::nn::functional;
125
+ /// F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1));
126
+ /// ```
127
+ struct TORCH_API NormalizeFuncOptions {
128
+ /// The exponent value in the norm formulation. Default: 2.0
129
+ TORCH_ARG(double, p) = 2.0;
130
+ /// The dimension to reduce. Default: 1
131
+ TORCH_ARG(int64_t, dim) = 1;
132
+ /// Small value to avoid division by zero. Default: 1e-12
133
+ TORCH_ARG(double, eps) = 1e-12;
134
+ /// the output tensor. If `out` is used, this
135
+ /// operation won't be differentiable.
136
+ TORCH_ARG(c10::optional<Tensor>, out) = c10::nullopt;
137
+ };
138
+
139
+ } // namespace functional
140
+
141
+ // ============================================================================
142
+
143
+ /// Options for the `GroupNorm` module.
144
+ ///
145
+ /// Example:
146
+ /// ```
147
+ /// GroupNorm model(GroupNormOptions(2, 2).eps(2e-5).affine(false));
148
+ /// ```
149
+ struct TORCH_API GroupNormOptions {
150
+ /* implicit */ GroupNormOptions(int64_t num_groups, int64_t num_channels);
151
+
152
+ /// number of groups to separate the channels into
153
+ TORCH_ARG(int64_t, num_groups);
154
+ /// number of channels expected in input
155
+ TORCH_ARG(int64_t, num_channels);
156
+ /// a value added to the denominator for numerical stability. Default: 1e-5
157
+ TORCH_ARG(double, eps) = 1e-5;
158
+ /// a boolean value that when set to ``true``, this module
159
+ /// has learnable per-channel affine parameters initialized to ones (for
160
+ /// weights) and zeros (for biases). Default: ``true``.
161
+ TORCH_ARG(bool, affine) = true;
162
+ };
163
+
164
+ // ============================================================================
165
+
166
+ namespace functional {
167
+
168
+ /// Options for `torch::nn::functional::group_norm`.
169
+ ///
170
+ /// Example:
171
+ /// ```
172
+ /// namespace F = torch::nn::functional;
173
+ /// F::group_norm(input, F::GroupNormFuncOptions(2).eps(2e-5));
174
+ /// ```
175
+ struct TORCH_API GroupNormFuncOptions {
176
+ /* implicit */ GroupNormFuncOptions(int64_t num_groups);
177
+
178
+ /// number of groups to separate the channels into
179
+ TORCH_ARG(int64_t, num_groups);
180
+
181
+ TORCH_ARG(Tensor, weight) = {};
182
+
183
+ TORCH_ARG(Tensor, bias) = {};
184
+
185
+ /// a value added to the denominator for numerical stability. Default: 1e-5
186
+ TORCH_ARG(double, eps) = 1e-5;
187
+ };
188
+
189
+ } // namespace functional
190
+
191
+ } // namespace nn
192
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/padding.h ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/enum.h>
6
+ #include <torch/expanding_array.h>
7
+ #include <torch/types.h>
8
+
9
+ namespace torch {
10
+ namespace nn {
11
+
12
+ /// Options for a `D`-dimensional ReflectionPad module.
13
+ template <size_t D>
14
+ struct TORCH_API ReflectionPadOptions {
15
+ ReflectionPadOptions(ExpandingArray<D * 2> padding) : padding_(padding) {}
16
+
17
+ /// The size of the padding.
18
+ /// If it is `int`, uses the same padding in all boundaries.
19
+ /// If it is a 2-`tuple` (for ReflectionPad1d), uses (padding_left,
20
+ /// padding_right). If it is a 4-`tuple` (for ReflectionPad2d), uses
21
+ /// (padding_left, padding_right, padding_top, padding_bottom). If it is a
22
+ /// 6-`tuple` (for ReflectionPad3d), uses (padding_left, padding_right,
23
+ /// padding_top, padding_bottom, padding_front, padding_back).
24
+
25
+ TORCH_ARG(ExpandingArray<D * 2>, padding);
26
+ };
27
+
28
+ /// `ReflectionPadOptions` specialized for the `ReflectionPad1d` module.
29
+ ///
30
+ /// Example:
31
+ /// ```
32
+ /// ReflectionPad1d model(ReflectionPad1dOptions({3, 1}));
33
+ /// ```
34
+ using ReflectionPad1dOptions = ReflectionPadOptions<1>;
35
+
36
+ /// `ReflectionPadOptions` specialized for the `ReflectionPad2d` module.
37
+ ///
38
+ /// Example:
39
+ /// ```
40
+ /// ReflectionPad2d model(ReflectionPad2dOptions({1, 1, 2, 0}));
41
+ /// ```
42
+ using ReflectionPad2dOptions = ReflectionPadOptions<2>;
43
+
44
+ /// `ReflectionPadOptions` specialized for the `ReflectionPad3d` module.
45
+ ///
46
+ /// Example:
47
+ /// ```
48
+ /// ReflectionPad3d model(ReflectionPad3dOptions({1, 1, 2, 0, 1, 1}));
49
+ /// ```
50
+ using ReflectionPad3dOptions = ReflectionPadOptions<3>;
51
+
52
+ // ============================================================================
53
+
54
+ /// Options for a `D`-dimensional ReplicationPad module.
55
+ template <size_t D>
56
+ struct TORCH_API ReplicationPadOptions {
57
+ ReplicationPadOptions(ExpandingArray<D * 2> padding) : padding_(padding) {}
58
+
59
+ /// The size of the padding.
60
+ /// - If it is `int`, uses the same padding in all boundaries.
61
+ /// - If it is a 2-`tuple` (for ReplicationPad1d), uses (padding_left,
62
+ /// padding_right).
63
+ /// - If it is a 4-`tuple` (for ReplicationPad2d), uses (padding_left,
64
+ /// padding_right, padding_top, padding_bottom).
65
+ /// - If it is a 6-`tuple` (for ReplicationPad3d), uses
66
+ /// (padding_left, padding_right, padding_top, padding_bottom,
67
+ /// padding_front, padding_back).
68
+ TORCH_ARG(ExpandingArray<D * 2>, padding);
69
+ };
70
+
71
+ /// `ReplicationPadOptions` specialized for the `ReplicationPad1d` module.
72
+ ///
73
+ /// Example:
74
+ /// ```
75
+ /// ReplicationPad1d model(ReplicationPad1dOptions({3, 1}));
76
+ /// ```
77
+ using ReplicationPad1dOptions = ReplicationPadOptions<1>;
78
+
79
+ /// `ReplicationPadOptions` specialized for the `ReplicationPad2d` module.
80
+ ///
81
+ /// Example:
82
+ /// ```
83
+ /// ReplicationPad2d model(ReplicationPad2dOptions({1, 1, 2, 0}));
84
+ /// ```
85
+ using ReplicationPad2dOptions = ReplicationPadOptions<2>;
86
+
87
+ /// `ReplicationPadOptions` specialized for the `ReplicationPad3d` module.
88
+ ///
89
+ /// Example:
90
+ /// ```
91
+ /// ReplicationPad3d model(ReplicationPad3dOptions({1, 2, 1, 2, 1, 2}));
92
+ /// ```
93
+ using ReplicationPad3dOptions = ReplicationPadOptions<3>;
94
+
95
+ // ============================================================================
96
+
97
+ template <size_t D>
98
+ struct TORCH_API ZeroPadOptions {
99
+ ZeroPadOptions(ExpandingArray<D * 2> padding) : padding_(padding) {}
100
+
101
+ /// The size of the padding.
102
+ /// - If it is `int`, uses the same padding in all boundaries.
103
+ /// - If it is a 2-`tuple` (for ZeroPad1d), uses (padding_left,
104
+ /// padding_right).
105
+ /// - If it is a 4-`tuple` (for ZeroPad2d), uses (padding_left, padding_right,
106
+ /// padding_top, padding_bottom).
107
+ /// - If it is a 6-`tuple` (for ZeroPad3d), uses
108
+ /// (padding_left, padding_right, padding_top, padding_bottom,
109
+ /// padding_front, padding_back).
110
+ TORCH_ARG(ExpandingArray<D * 2>, padding);
111
+ };
112
+
113
+ /// `ZeroPadOptions` specialized for the `ZeroPad1d` module.
114
+ ///
115
+ /// Example:
116
+ /// ```
117
+ /// ConstantPad1d model(ConstantPad1dOptions({3, 1});
118
+ /// ```
119
+ using ZeroPad1dOptions = ZeroPadOptions<1>;
120
+
121
+ /// `ZeroPadOptions` specialized for the `ZeroPad2d` module.
122
+ ///
123
+ /// Example:
124
+ /// ```
125
+ /// ConstantPad2d model(ConstantPad2dOptions({1, 1, 2, 0});
126
+ /// ```
127
+ using ZeroPad2dOptions = ZeroPadOptions<2>;
128
+
129
+ /// `ZeroPadOptions` specialized for the `ZeroPad3d` module.
130
+ ///
131
+ /// Example:
132
+ /// ```
133
+ /// ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2});
134
+ /// ```
135
+ using ZeroPad3dOptions = ZeroPadOptions<3>;
136
+
137
+ // ============================================================================
138
+
139
+ /// Options for a `D`-dimensional ConstantPad module.
140
+ template <size_t D>
141
+ struct TORCH_API ConstantPadOptions {
142
+ ConstantPadOptions(ExpandingArray<D * 2> padding, double value)
143
+ : padding_(padding), value_(value) {}
144
+
145
+ /// The size of the padding.
146
+ /// - If it is `int`, uses the same padding in all boundaries.
147
+ /// - If it is a 2-`tuple` (for ConstantPad1d), uses (padding_left,
148
+ /// padding_right).
149
+ /// - If it is a 4-`tuple` (for ConstantPad2d), uses (padding_left,
150
+ /// padding_right, padding_top, padding_bottom).
151
+ /// - If it is a 6-`tuple` (for ConstantPad3d), uses
152
+ /// (padding_left, padding_right, padding_top, padding_bottom,
153
+ /// padding_front, padding_back).
154
+ TORCH_ARG(ExpandingArray<D * 2>, padding);
155
+
156
+ /// Fill value for constant padding.
157
+ TORCH_ARG(double, value);
158
+ };
159
+
160
+ /// `ConstantPadOptions` specialized for the `ConstantPad1d` module.
161
+ ///
162
+ /// Example:
163
+ /// ```
164
+ /// ConstantPad1d model(ConstantPad1dOptions({3, 1}, 3.5));
165
+ /// ```
166
+ using ConstantPad1dOptions = ConstantPadOptions<1>;
167
+
168
+ /// `ConstantPadOptions` specialized for the `ConstantPad2d` module.
169
+ ///
170
+ /// Example:
171
+ /// ```
172
+ /// ConstantPad2d model(ConstantPad2dOptions({3, 0, 2, 1}, 3.5));
173
+ /// ```
174
+ using ConstantPad2dOptions = ConstantPadOptions<2>;
175
+
176
+ /// `ConstantPadOptions` specialized for the `ConstantPad3d` module.
177
+ ///
178
+ /// Example:
179
+ /// ```
180
+ /// ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2}, 3.5));
181
+ /// ```
182
+ using ConstantPad3dOptions = ConstantPadOptions<3>;
183
+
184
+ // ============================================================================
185
+
186
+ namespace functional {
187
+
188
+ /// Options for `torch::nn::functional::pad`.
189
+ ///
190
+ /// Example:
191
+ /// ```
192
+ /// namespace F = torch::nn::functional;
193
+ /// F::pad(input, F::PadFuncOptions({1, 2, 2, 1, 1,
194
+ /// 2}).mode(torch::kReplicate));
195
+ /// ```
196
+ struct TORCH_API PadFuncOptions {
197
+ typedef std::variant<
198
+ enumtype::kConstant,
199
+ enumtype::kReflect,
200
+ enumtype::kReplicate,
201
+ enumtype::kCircular>
202
+ mode_t;
203
+
204
+ PadFuncOptions(std::vector<int64_t> pad);
205
+
206
+ /// m-elements tuple, where m/2 <= input dimensions and m is even.
207
+ TORCH_ARG(std::vector<int64_t>, pad);
208
+
209
+ /// "constant", "reflect", "replicate" or "circular". Default: "constant"
210
+ TORCH_ARG(mode_t, mode) = torch::kConstant;
211
+
212
+ /// fill value for "constant" padding. Default: 0
213
+ TORCH_ARG(double, value) = 0;
214
+ };
215
+
216
+ } // namespace functional
217
+
218
+ } // namespace nn
219
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pixelshuffle.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/types.h>
6
+
7
+ namespace torch {
8
+ namespace nn {
9
+
10
+ /// Options for the `PixelShuffle` module.
11
+ ///
12
+ /// Example:
13
+ /// ```
14
+ /// PixelShuffle model(PixelShuffleOptions(5));
15
+ /// ```
16
+ struct TORCH_API PixelShuffleOptions {
17
+ PixelShuffleOptions(int64_t upscale_factor)
18
+ : upscale_factor_(upscale_factor) {}
19
+
20
+ /// Factor to increase spatial resolution by
21
+ TORCH_ARG(int64_t, upscale_factor);
22
+ };
23
+
24
+ /// Options for the `PixelUnshuffle` module.
25
+ ///
26
+ /// Example:
27
+ /// ```
28
+ /// PixelUnshuffle model(PixelUnshuffleOptions(5));
29
+ /// ```
30
+ struct TORCH_API PixelUnshuffleOptions {
31
+ /* implicit */ PixelUnshuffleOptions(int64_t downscale_factor)
32
+ : downscale_factor_(downscale_factor) {}
33
+
34
+ /// Factor to decrease spatial resolution by
35
+ TORCH_ARG(int64_t, downscale_factor);
36
+ };
37
+
38
+ namespace functional {
39
+ /// Options for `torch::nn::functional::pixel_shuffle`.
40
+ ///
41
+ /// See the documentation for `torch::nn::PixelShuffleOptions` class to learn
42
+ /// what arguments are supported.
43
+ ///
44
+ /// Example:
45
+ /// ```
46
+ /// namespace F = torch::nn::functional;
47
+ /// F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2));
48
+ /// ```
49
+ using PixelShuffleFuncOptions = PixelShuffleOptions;
50
+
51
+ /// Options for `torch::nn::functional::pixel_unshuffle`.
52
+ ///
53
+ /// See the documentation for `torch::nn::PixelUnshuffleOptions` class to learn
54
+ /// what arguments are supported.
55
+ ///
56
+ /// Example:
57
+ /// ```
58
+ /// namespace F = torch::nn::functional;
59
+ /// F::pixel_unshuffle(x, F::PixelUnshuffleFuncOptions(2));
60
+ /// ```
61
+ using PixelUnshuffleFuncOptions = PixelUnshuffleOptions;
62
+ } // namespace functional
63
+
64
+ } // namespace nn
65
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pooling.h ADDED
@@ -0,0 +1,596 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/expanding_array.h>
6
+ #include <torch/types.h>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+
11
+ /// Options for a `D`-dimensional avgpool module.
12
+ template <size_t D>
13
+ struct AvgPoolOptions {
14
+ AvgPoolOptions(ExpandingArray<D> kernel_size)
15
+ : kernel_size_(kernel_size), stride_(kernel_size) {}
16
+
17
+ /// the size of the window to take an average over
18
+ TORCH_ARG(ExpandingArray<D>, kernel_size);
19
+
20
+ /// the stride of the window. Default value is `kernel_size`
21
+ TORCH_ARG(ExpandingArray<D>, stride);
22
+
23
+ /// implicit zero padding to be added on both sides
24
+ TORCH_ARG(ExpandingArray<D>, padding) = 0;
25
+
26
+ /// when True, will use `ceil` instead of `floor` to compute the output shape
27
+ TORCH_ARG(bool, ceil_mode) = false;
28
+
29
+ /// when True, will include the zero-padding in the averaging calculation
30
+ TORCH_ARG(bool, count_include_pad) = true;
31
+
32
+ /// if specified, it will be used as divisor, otherwise size of the pooling
33
+ /// region will be used.
34
+
35
+ TORCH_ARG(c10::optional<int64_t>, divisor_override) = c10::nullopt;
36
+ };
37
+
38
+ /// `AvgPoolOptions` specialized for the `AvgPool1d` module.
39
+ ///
40
+ /// Example:
41
+ /// ```
42
+ /// AvgPool1d model(AvgPool1dOptions(3).stride(2));
43
+ /// ```
44
+ using AvgPool1dOptions = AvgPoolOptions<1>;
45
+
46
+ /// `AvgPoolOptions` specialized for the `AvgPool2d` module.
47
+ ///
48
+ /// Example:
49
+ /// ```
50
+ /// AvgPool2d model(AvgPool2dOptions({3, 2}).stride({2, 2}));
51
+ /// ```
52
+ using AvgPool2dOptions = AvgPoolOptions<2>;
53
+
54
+ /// `AvgPoolOptions` specialized for the `AvgPool3d` module.
55
+ ///
56
+ /// Example:
57
+ /// ```
58
+ /// AvgPool3d model(AvgPool3dOptions(5).stride(2));
59
+ /// ```
60
+ using AvgPool3dOptions = AvgPoolOptions<3>;
61
+
62
+ namespace functional {
63
+ /// Options for `torch::nn::functional::avg_pool1d`.
64
+ ///
65
+ /// See the documentation for `torch::nn::AvgPool1dOptions` class to learn what
66
+ /// arguments are supported.
67
+ ///
68
+ /// Example:
69
+ /// ```
70
+ /// namespace F = torch::nn::functional;
71
+ /// F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2));
72
+ /// ```
73
+ using AvgPool1dFuncOptions = AvgPool1dOptions;
74
+ } // namespace functional
75
+
76
+ namespace functional {
77
+ /// Options for `torch::nn::functional::avg_pool2d`.
78
+ ///
79
+ /// See the documentation for `torch::nn::AvgPool2dOptions` class to learn what
80
+ /// arguments are supported.
81
+ ///
82
+ /// Example:
83
+ /// ```
84
+ /// namespace F = torch::nn::functional;
85
+ /// F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2));
86
+ /// ```
87
+ using AvgPool2dFuncOptions = AvgPool2dOptions;
88
+ } // namespace functional
89
+
90
+ namespace functional {
91
+ /// Options for `torch::nn::functional::avg_pool3d`.
92
+ ///
93
+ /// See the documentation for `torch::nn::AvgPool3dOptions` class to learn what
94
+ /// arguments are supported.
95
+ ///
96
+ /// Example:
97
+ /// ```
98
+ /// namespace F = torch::nn::functional;
99
+ /// F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2));
100
+ /// ```
101
+ using AvgPool3dFuncOptions = AvgPool3dOptions;
102
+ } // namespace functional
103
+
104
+ // ============================================================================
105
+
106
+ /// Options for a `D`-dimensional maxpool module.
107
+ template <size_t D>
108
+ struct MaxPoolOptions {
109
+ MaxPoolOptions(ExpandingArray<D> kernel_size)
110
+ : kernel_size_(kernel_size), stride_(kernel_size) {}
111
+
112
+ /// the size of the window to take a max over
113
+ TORCH_ARG(ExpandingArray<D>, kernel_size);
114
+
115
+ /// the stride of the window. Default value is `kernel_size
116
+ TORCH_ARG(ExpandingArray<D>, stride);
117
+
118
+ /// implicit zero padding to be added on both sides
119
+ TORCH_ARG(ExpandingArray<D>, padding) = 0;
120
+
121
+ /// a parameter that controls the stride of elements in the window
122
+ TORCH_ARG(ExpandingArray<D>, dilation) = 1;
123
+
124
+ /// when True, will use `ceil` instead of `floor` to compute the output shape
125
+ TORCH_ARG(bool, ceil_mode) = false;
126
+ };
127
+
128
+ /// `MaxPoolOptions` specialized for the `MaxPool1d` module.
129
+ ///
130
+ /// Example:
131
+ /// ```
132
+ /// MaxPool1d model(MaxPool1dOptions(3).stride(2));
133
+ /// ```
134
+ using MaxPool1dOptions = MaxPoolOptions<1>;
135
+
136
+ /// `MaxPoolOptions` specialized for the `MaxPool2d` module.
137
+ ///
138
+ /// Example:
139
+ /// ```
140
+ /// MaxPool2d model(MaxPool2dOptions({3, 2}).stride({2, 2}));
141
+ /// ```
142
+ using MaxPool2dOptions = MaxPoolOptions<2>;
143
+
144
+ /// `MaxPoolOptions` specialized for the `MaxPool3d` module.
145
+ ///
146
+ /// Example:
147
+ /// ```
148
+ /// MaxPool3d model(MaxPool3dOptions(3).stride(2));
149
+ /// ```
150
+ using MaxPool3dOptions = MaxPoolOptions<3>;
151
+
152
+ namespace functional {
153
+ /// Options for `torch::nn::functional::max_pool1d` and
154
+ /// `torch::nn::functional::max_pool1d_with_indices`.
155
+ ///
156
+ /// Example:
157
+ /// ```
158
+ /// namespace F = torch::nn::functional;
159
+ /// F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2));
160
+ /// ```
161
+ using MaxPool1dFuncOptions = MaxPool1dOptions;
162
+ } // namespace functional
163
+
164
+ namespace functional {
165
+ /// Options for `torch::nn::functional::max_pool2d` and
166
+ /// `torch::nn::functional::max_pool2d_with_indices`.
167
+ ///
168
+ /// Example:
169
+ /// ```
170
+ /// namespace F = torch::nn::functional;
171
+ /// F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2));
172
+ /// ```
173
+ using MaxPool2dFuncOptions = MaxPool2dOptions;
174
+ } // namespace functional
175
+
176
+ namespace functional {
177
+ /// Options for `torch::nn::functional::max_pool3d` and
178
+ /// `torch::nn::functional::max_pool3d_with_indices`.
179
+ ///
180
+ /// Example:
181
+ /// ```
182
+ /// namespace F = torch::nn::functional;
183
+ /// F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2));
184
+ /// ```
185
+ using MaxPool3dFuncOptions = MaxPool3dOptions;
186
+ } // namespace functional
187
+
188
+ // ============================================================================
189
+
190
+ /// Options for a `D`-dimensional adaptive maxpool module.
191
+ template <typename output_size_t>
192
+ struct AdaptiveMaxPoolOptions {
193
+ AdaptiveMaxPoolOptions(output_size_t output_size)
194
+ : output_size_(output_size) {}
195
+
196
+ /// the target output size
197
+ TORCH_ARG(output_size_t, output_size);
198
+ };
199
+
200
+ /// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool1d` module.
201
+ ///
202
+ /// Example:
203
+ /// ```
204
+ /// AdaptiveMaxPool1d model(AdaptiveMaxPool1dOptions(3));
205
+ /// ```
206
+ using AdaptiveMaxPool1dOptions = AdaptiveMaxPoolOptions<ExpandingArray<1>>;
207
+
208
+ /// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool2d` module.
209
+ ///
210
+ /// Example:
211
+ /// ```
212
+ /// AdaptiveMaxPool2d model(AdaptiveMaxPool2dOptions({3, 2}));
213
+ /// ```
214
+ using AdaptiveMaxPool2dOptions =
215
+ AdaptiveMaxPoolOptions<ExpandingArrayWithOptionalElem<2>>;
216
+
217
+ /// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool3d` module.
218
+ ///
219
+ /// Example:
220
+ /// ```
221
+ /// AdaptiveMaxPool3d model(AdaptiveMaxPool3dOptions(3));
222
+ /// ```
223
+ using AdaptiveMaxPool3dOptions =
224
+ AdaptiveMaxPoolOptions<ExpandingArrayWithOptionalElem<3>>;
225
+
226
+ namespace functional {
227
+ /// Options for `torch::nn::functional::adaptive_max_pool1d` and
228
+ /// `torch::nn::functional::adaptive_max_pool1d_with_indices`
229
+ ///
230
+ /// Example:
231
+ /// ```
232
+ /// namespace F = torch::nn::functional;
233
+ /// F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3));
234
+ /// ```
235
+ using AdaptiveMaxPool1dFuncOptions = AdaptiveMaxPool1dOptions;
236
+ } // namespace functional
237
+
238
+ namespace functional {
239
+ /// Options for `torch::nn::functional::adaptive_max_pool2d` and
240
+ /// `torch::nn::functional::adaptive_max_pool2d_with_indices`
241
+ ///
242
+ /// Example:
243
+ /// ```
244
+ /// namespace F = torch::nn::functional;
245
+ /// F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3));
246
+ /// ```
247
+ using AdaptiveMaxPool2dFuncOptions = AdaptiveMaxPool2dOptions;
248
+ } // namespace functional
249
+
250
+ namespace functional {
251
+ /// Options for `torch::nn::functional::adaptive_max_pool3d` and
252
+ /// `torch::nn::functional::adaptive_max_pool3d_with_indices`
253
+ ///
254
+ /// Example:
255
+ /// ```
256
+ /// namespace F = torch::nn::functional;
257
+ /// F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3));
258
+ /// ```
259
+ using AdaptiveMaxPool3dFuncOptions = AdaptiveMaxPool3dOptions;
260
+ } // namespace functional
261
+
262
+ // ============================================================================
263
+
264
+ /// Options for a `D`-dimensional adaptive avgpool module.
265
+ template <typename output_size_t>
266
+ struct AdaptiveAvgPoolOptions {
267
+ AdaptiveAvgPoolOptions(output_size_t output_size)
268
+ : output_size_(output_size) {}
269
+
270
+ /// the target output size
271
+ TORCH_ARG(output_size_t, output_size);
272
+ };
273
+
274
+ /// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool1d` module.
275
+ ///
276
+ /// Example:
277
+ /// ```
278
+ /// AdaptiveAvgPool1d model(AdaptiveAvgPool1dOptions(5));
279
+ /// ```
280
+ using AdaptiveAvgPool1dOptions = AdaptiveAvgPoolOptions<ExpandingArray<1>>;
281
+
282
+ /// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool2d` module.
283
+ ///
284
+ /// Example:
285
+ /// ```
286
+ /// AdaptiveAvgPool2d model(AdaptiveAvgPool2dOptions({3, 2}));
287
+ /// ```
288
+ using AdaptiveAvgPool2dOptions =
289
+ AdaptiveAvgPoolOptions<ExpandingArrayWithOptionalElem<2>>;
290
+
291
+ /// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool3d` module.
292
+ ///
293
+ /// Example:
294
+ /// ```
295
+ /// AdaptiveAvgPool3d model(AdaptiveAvgPool3dOptions(3));
296
+ /// ```
297
+ using AdaptiveAvgPool3dOptions =
298
+ AdaptiveAvgPoolOptions<ExpandingArrayWithOptionalElem<3>>;
299
+
300
+ namespace functional {
301
+ /// Options for `torch::nn::functional::adaptive_avg_pool1d`.
302
+ ///
303
+ /// See the documentation for `torch::nn::AdaptiveAvgPool1dOptions` class to
304
+ /// learn what arguments are supported.
305
+ ///
306
+ /// Example:
307
+ /// ```
308
+ /// namespace F = torch::nn::functional;
309
+ /// F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3));
310
+ /// ```
311
+ using AdaptiveAvgPool1dFuncOptions = AdaptiveAvgPool1dOptions;
312
+ } // namespace functional
313
+
314
+ namespace functional {
315
+ /// Options for `torch::nn::functional::adaptive_avg_pool2d`.
316
+ ///
317
+ /// See the documentation for `torch::nn::AdaptiveAvgPool2dOptions` class to
318
+ /// learn what arguments are supported.
319
+ ///
320
+ /// Example:
321
+ /// ```
322
+ /// namespace F = torch::nn::functional;
323
+ /// F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3));
324
+ /// ```
325
+ using AdaptiveAvgPool2dFuncOptions = AdaptiveAvgPool2dOptions;
326
+ } // namespace functional
327
+
328
+ namespace functional {
329
+ /// Options for `torch::nn::functional::adaptive_avg_pool3d`.
330
+ ///
331
+ /// See the documentation for `torch::nn::AdaptiveAvgPool3dOptions` class to
332
+ /// learn what arguments are supported.
333
+ ///
334
+ /// Example:
335
+ /// ```
336
+ /// namespace F = torch::nn::functional;
337
+ /// F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3));
338
+ /// ```
339
+ using AdaptiveAvgPool3dFuncOptions = AdaptiveAvgPool3dOptions;
340
+ } // namespace functional
341
+
342
+ // ============================================================================
343
+
344
+ /// Options for a `D`-dimensional maxunpool module.
345
+ template <size_t D>
346
+ struct MaxUnpoolOptions {
347
+ MaxUnpoolOptions(ExpandingArray<D> kernel_size)
348
+ : kernel_size_(kernel_size), stride_(kernel_size) {}
349
+
350
+ /// the size of the window to take a max over
351
+ TORCH_ARG(ExpandingArray<D>, kernel_size);
352
+
353
+ /// the stride of the window. Default value is `kernel_size
354
+ TORCH_ARG(ExpandingArray<D>, stride);
355
+
356
+ /// implicit zero padding to be added on both sides
357
+ TORCH_ARG(ExpandingArray<D>, padding) = 0;
358
+ };
359
+
360
+ /// `MaxUnpoolOptions` specialized for the `MaxUnpool1d` module.
361
+ ///
362
+ /// Example:
363
+ /// ```
364
+ /// MaxUnpool1d model(MaxUnpool1dOptions(3).stride(2).padding(1));
365
+ /// ```
366
+ using MaxUnpool1dOptions = MaxUnpoolOptions<1>;
367
+
368
+ /// `MaxUnpoolOptions` specialized for the `MaxUnpool2d` module.
369
+ ///
370
+ /// Example:
371
+ /// ```
372
+ /// MaxUnpool2d model(MaxUnpool2dOptions(3).stride(2).padding(1));
373
+ /// ```
374
+ using MaxUnpool2dOptions = MaxUnpoolOptions<2>;
375
+
376
+ /// `MaxUnpoolOptions` specialized for the `MaxUnpool3d` module.
377
+ ///
378
+ /// Example:
379
+ /// ```
380
+ /// MaxUnpool3d model(MaxUnpool3dOptions(3).stride(2).padding(1));
381
+ /// ```
382
+ using MaxUnpool3dOptions = MaxUnpoolOptions<3>;
383
+
384
+ // ============================================================================
385
+
386
+ namespace functional {
387
+
388
+ /// Options for a `D`-dimensional maxunpool functional.
389
+ template <size_t D>
390
+ struct MaxUnpoolFuncOptions {
391
+ MaxUnpoolFuncOptions(ExpandingArray<D> kernel_size)
392
+ : kernel_size_(kernel_size), stride_(kernel_size) {}
393
+
394
+ /// the size of the window to take a max over
395
+ TORCH_ARG(ExpandingArray<D>, kernel_size);
396
+
397
+ /// the stride of the window. Default value is `kernel_size
398
+ TORCH_ARG(ExpandingArray<D>, stride);
399
+
400
+ /// implicit zero padding to be added on both sides
401
+ TORCH_ARG(ExpandingArray<D>, padding) = 0;
402
+
403
+ /// the targeted output size
404
+ TORCH_ARG(c10::optional<std::vector<int64_t>>, output_size) = c10::nullopt;
405
+ };
406
+
407
+ /// `MaxUnpoolFuncOptions` specialized for
408
+ /// `torch::nn::functional::max_unpool1d`.
409
+ ///
410
+ /// Example:
411
+ /// ```
412
+ /// namespace F = torch::nn::functional;
413
+ /// F::max_unpool1d(x, indices,
414
+ /// F::MaxUnpool1dFuncOptions(3).stride(2).padding(1));
415
+ /// ```
416
+ using MaxUnpool1dFuncOptions = MaxUnpoolFuncOptions<1>;
417
+
418
+ /// `MaxUnpoolFuncOptions` specialized for
419
+ /// `torch::nn::functional::max_unpool2d`.
420
+ ///
421
+ /// Example:
422
+ /// ```
423
+ /// namespace F = torch::nn::functional;
424
+ /// F::max_unpool2d(x, indices,
425
+ /// F::MaxUnpool2dFuncOptions(3).stride(2).padding(1));
426
+ /// ```
427
+ using MaxUnpool2dFuncOptions = MaxUnpoolFuncOptions<2>;
428
+
429
+ /// `MaxUnpoolFuncOptions` specialized for
430
+ /// `torch::nn::functional::max_unpool3d`.
431
+ ///
432
+ /// Example:
433
+ /// ```
434
+ /// namespace F = torch::nn::functional;
435
+ /// F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3));
436
+ /// ```
437
+ using MaxUnpool3dFuncOptions = MaxUnpoolFuncOptions<3>;
438
+
439
+ } // namespace functional
440
+
441
+ // ============================================================================
442
+
443
+ /// Options for a `D`-dimensional fractional maxpool module.
444
+ template <size_t D>
445
+ struct FractionalMaxPoolOptions {
446
+ FractionalMaxPoolOptions(ExpandingArray<D> kernel_size)
447
+ : kernel_size_(kernel_size) {}
448
+
449
+ /// the size of the window to take a max over
450
+ TORCH_ARG(ExpandingArray<D>, kernel_size);
451
+
452
+ /// the target output size of the image
453
+ TORCH_ARG(c10::optional<ExpandingArray<D>>, output_size) = c10::nullopt;
454
+
455
+ /// If one wants to have an output size as a ratio of the input size, this
456
+ /// option can be given. This has to be a number or tuple in the range (0, 1)
457
+ using ExpandingArrayDouble = torch::ExpandingArray<D, double>;
458
+ TORCH_ARG(c10::optional<ExpandingArrayDouble>, output_ratio) = c10::nullopt;
459
+
460
+ TORCH_ARG(torch::Tensor, _random_samples) = Tensor();
461
+ };
462
+
463
+ /// `FractionalMaxPoolOptions` specialized for the `FractionalMaxPool2d` module.
464
+ ///
465
+ /// Example:
466
+ /// ```
467
+ /// FractionalMaxPool2d model(FractionalMaxPool2dOptions(5).output_size(1));
468
+ /// ```
469
+ using FractionalMaxPool2dOptions = FractionalMaxPoolOptions<2>;
470
+
471
+ /// `FractionalMaxPoolOptions` specialized for the `FractionalMaxPool3d` module.
472
+ ///
473
+ /// Example:
474
+ /// ```
475
+ /// FractionalMaxPool3d model(FractionalMaxPool3dOptions(5).output_size(1));
476
+ /// ```
477
+ using FractionalMaxPool3dOptions = FractionalMaxPoolOptions<3>;
478
+
479
+ namespace functional {
480
+ /// Options for `torch::nn::functional::fractional_max_pool2d` and
481
+ /// `torch::nn::functional::fractional_max_pool2d_with_indices`
482
+ ///
483
+ /// Example:
484
+ /// ```
485
+ /// namespace F = torch::nn::functional;
486
+ /// F::fractional_max_pool2d(x,
487
+ /// F::FractionalMaxPool2dFuncOptions(3).output_size(2));
488
+ /// ```
489
+ using FractionalMaxPool2dFuncOptions = FractionalMaxPool2dOptions;
490
+ } // namespace functional
491
+
492
+ namespace functional {
493
+ /// Options for `torch::nn::functional::fractional_max_pool3d` and
494
+ /// `torch::nn::functional::fractional_max_pool3d_with_indices`
495
+ ///
496
+ /// Example:
497
+ /// ```
498
+ /// namespace F = torch::nn::functional;
499
+ /// F::fractional_max_pool3d(x,
500
+ /// F::FractionalMaxPool3dFuncOptions(3).output_size(2));
501
+ /// ```
502
+ using FractionalMaxPool3dFuncOptions = FractionalMaxPool3dOptions;
503
+ } // namespace functional
504
+
505
+ // ============================================================================
506
+
507
+ /// Options for a `D`-dimensional lppool module.
508
+ template <size_t D>
509
+ struct LPPoolOptions {
510
+ LPPoolOptions(double norm_type, ExpandingArray<D> kernel_size)
511
+ : norm_type_(norm_type),
512
+ kernel_size_(kernel_size),
513
+ stride_(kernel_size) {}
514
+
515
+ TORCH_ARG(double, norm_type);
516
+
517
+ // the size of the window to take an average over
518
+ TORCH_ARG(ExpandingArray<D>, kernel_size);
519
+
520
+ // the stride of the window. Default value is `kernel_size`
521
+ TORCH_ARG(ExpandingArray<D>, stride);
522
+
523
+ // when True, will use `ceil` instead of `floor` to compute the output shape
524
+ TORCH_ARG(bool, ceil_mode) = false;
525
+ };
526
+
527
+ /// `LPPoolOptions` specialized for the `LPPool1d` module.
528
+ ///
529
+ /// Example:
530
+ /// ```
531
+ /// LPPool1d model(LPPool1dOptions(1, 2).stride(5).ceil_mode(true));
532
+ /// ```
533
+ using LPPool1dOptions = LPPoolOptions<1>;
534
+
535
+ /// `LPPoolOptions` specialized for the `LPPool2d` module.
536
+ ///
537
+ /// Example:
538
+ /// ```
539
+ /// LPPool2d model(LPPool2dOptions(1, std::vector<int64_t>({3, 4})).stride({5,
540
+ /// 6}).ceil_mode(true));
541
+ /// ```
542
+ using LPPool2dOptions = LPPoolOptions<2>;
543
+
544
+ /// `LPPoolOptions` specialized for the `LPPool3d` module.
545
+ ///
546
+ /// Example:
547
+ /// ```
548
+ /// LPPool3d model(LPPool3dOptions(1, std::vector<int64_t>({3, 4, 5})).stride(
549
+ /// {5, 6, 7}).ceil_mode(true));
550
+ /// ```
551
+ using LPPool3dOptions = LPPoolOptions<3>;
552
+
553
+ namespace functional {
554
+ /// Options for `torch::nn::functional::lp_pool1d`.
555
+ ///
556
+ /// See the documentation for `torch::nn::LPPool1dOptions` class to learn what
557
+ /// arguments are supported.
558
+ ///
559
+ /// Example:
560
+ /// ```
561
+ /// namespace F = torch::nn::functional;
562
+ /// F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2));
563
+ /// ```
564
+ using LPPool1dFuncOptions = LPPool1dOptions;
565
+ } // namespace functional
566
+
567
+ namespace functional {
568
+ /// Options for `torch::nn::functional::lp_pool2d`.
569
+ ///
570
+ /// See the documentation for `torch::nn::LPPool2dOptions` class to learn what
571
+ /// arguments are supported.
572
+ ///
573
+ /// Example:
574
+ /// ```
575
+ /// namespace F = torch::nn::functional;
576
+ /// F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2));
577
+ /// ```
578
+ using LPPool2dFuncOptions = LPPool2dOptions;
579
+ } // namespace functional
580
+
581
+ namespace functional {
582
+ /// Options for `torch::nn::functional::lp_pool3d`.
583
+ ///
584
+ /// See the documentation for `torch::nn::LPPool3dOptions` class to learn what
585
+ /// arguments are supported.
586
+ ///
587
+ /// Example:
588
+ /// ```
589
+ /// namespace F = torch::nn::functional;
590
+ /// F::lp_pool3d(x, F::LPPool3dFuncOptions(2, {2, 3, 4}).stride(2));
591
+ /// ```
592
+ using LPPool3dFuncOptions = LPPool3dOptions;
593
+ } // namespace functional
594
+
595
+ } // namespace nn
596
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/rnn.h ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/enum.h>
6
+ #include <torch/types.h>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+
11
+ namespace detail {
12
+
13
+ /// Common options for RNN, LSTM and GRU modules.
14
+ struct TORCH_API RNNOptionsBase {
15
+ typedef std::variant<
16
+ enumtype::kLSTM,
17
+ enumtype::kGRU,
18
+ enumtype::kRNN_TANH,
19
+ enumtype::kRNN_RELU>
20
+ rnn_options_base_mode_t;
21
+
22
+ RNNOptionsBase(
23
+ rnn_options_base_mode_t mode,
24
+ int64_t input_size,
25
+ int64_t hidden_size);
26
+
27
+ TORCH_ARG(rnn_options_base_mode_t, mode);
28
+ /// The number of features of a single sample in the input sequence `x`.
29
+ TORCH_ARG(int64_t, input_size);
30
+ /// The number of features in the hidden state `h`.
31
+ TORCH_ARG(int64_t, hidden_size);
32
+ /// The number of recurrent layers (cells) to use.
33
+ TORCH_ARG(int64_t, num_layers) = 1;
34
+ /// Whether a bias term should be added to all linear operations.
35
+ TORCH_ARG(bool, bias) = true;
36
+ /// If true, the input sequence should be provided as `(batch, sequence,
37
+ /// features)`. If false (default), the expected layout is `(sequence, batch,
38
+ /// features)`.
39
+ TORCH_ARG(bool, batch_first) = false;
40
+ /// If non-zero, adds dropout with the given probability to the output of each
41
+ /// RNN layer, except the final layer.
42
+ TORCH_ARG(double, dropout) = 0.0;
43
+ /// Whether to make the RNN bidirectional.
44
+ TORCH_ARG(bool, bidirectional) = false;
45
+ /// Cell projection dimension. If 0, projections are not added. Can only be
46
+ /// used for LSTMs.
47
+ TORCH_ARG(int64_t, proj_size) = 0;
48
+ };
49
+
50
+ } // namespace detail
51
+
52
+ /// Options for the `RNN` module.
53
+ ///
54
+ /// Example:
55
+ /// ```
56
+ /// RNN model(RNNOptions(128,
57
+ /// 64).num_layers(3).dropout(0.2).nonlinearity(torch::kTanh));
58
+ /// ```
59
+ struct TORCH_API RNNOptions {
60
+ typedef std::variant<enumtype::kTanh, enumtype::kReLU> nonlinearity_t;
61
+
62
+ RNNOptions(int64_t input_size, int64_t hidden_size);
63
+
64
+ /// The number of expected features in the input `x`
65
+ TORCH_ARG(int64_t, input_size);
66
+ /// The number of features in the hidden state `h`
67
+ TORCH_ARG(int64_t, hidden_size);
68
+ /// Number of recurrent layers. E.g., setting ``num_layers=2``
69
+ /// would mean stacking two RNNs together to form a `stacked RNN`,
70
+ /// with the second RNN taking in outputs of the first RNN and
71
+ /// computing the final results. Default: 1
72
+ TORCH_ARG(int64_t, num_layers) = 1;
73
+ /// The non-linearity to use. Can be either ``torch::kTanh`` or
74
+ /// ``torch::kReLU``. Default: ``torch::kTanh``
75
+ TORCH_ARG(nonlinearity_t, nonlinearity) = torch::kTanh;
76
+ /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
77
+ /// Default: ``true``
78
+ TORCH_ARG(bool, bias) = true;
79
+ /// If ``true``, then the input and output tensors are provided
80
+ /// as `(batch, seq, feature)`. Default: ``false``
81
+ TORCH_ARG(bool, batch_first) = false;
82
+ /// If non-zero, introduces a `Dropout` layer on the outputs of each
83
+ /// RNN layer except the last layer, with dropout probability equal to
84
+ /// `dropout`. Default: 0
85
+ TORCH_ARG(double, dropout) = 0.0;
86
+ /// If ``true``, becomes a bidirectional RNN. Default: ``false``
87
+ TORCH_ARG(bool, bidirectional) = false;
88
+ };
89
+
90
+ /// Options for the `LSTM` module.
91
+ ///
92
+ /// Example:
93
+ /// ```
94
+ /// LSTM model(LSTMOptions(2,
95
+ /// 4).num_layers(3).batch_first(false).bidirectional(true));
96
+ /// ```
97
+ struct TORCH_API LSTMOptions {
98
+ LSTMOptions(int64_t input_size, int64_t hidden_size);
99
+
100
+ /// The number of expected features in the input `x`
101
+ TORCH_ARG(int64_t, input_size);
102
+ /// The number of features in the hidden state `h`
103
+ TORCH_ARG(int64_t, hidden_size);
104
+ /// Number of recurrent layers. E.g., setting ``num_layers=2``
105
+ /// would mean stacking two LSTMs together to form a `stacked LSTM`,
106
+ /// with the second LSTM taking in outputs of the first LSTM and
107
+ /// computing the final results. Default: 1
108
+ TORCH_ARG(int64_t, num_layers) = 1;
109
+ /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
110
+ /// Default: ``true``
111
+ TORCH_ARG(bool, bias) = true;
112
+ /// If ``true``, then the input and output tensors are provided
113
+ /// as (batch, seq, feature). Default: ``false``
114
+ TORCH_ARG(bool, batch_first) = false;
115
+ /// If non-zero, introduces a `Dropout` layer on the outputs of each
116
+ /// LSTM layer except the last layer, with dropout probability equal to
117
+ /// `dropout`. Default: 0
118
+ TORCH_ARG(double, dropout) = 0.0;
119
+ /// If ``true``, becomes a bidirectional LSTM. Default: ``false``
120
+ TORCH_ARG(bool, bidirectional) = false;
121
+ /// Cell projection dimension. If 0, projections are not added
122
+ TORCH_ARG(int64_t, proj_size) = 0;
123
+ };
124
+
125
+ /// Options for the `GRU` module.
126
+ ///
127
+ /// Example:
128
+ /// ```
129
+ /// GRU model(GRUOptions(2,
130
+ /// 4).num_layers(3).batch_first(false).bidirectional(true));
131
+ /// ```
132
+ struct TORCH_API GRUOptions {
133
+ GRUOptions(int64_t input_size, int64_t hidden_size);
134
+
135
+ /// The number of expected features in the input `x`
136
+ TORCH_ARG(int64_t, input_size);
137
+ /// The number of features in the hidden state `h`
138
+ TORCH_ARG(int64_t, hidden_size);
139
+ /// Number of recurrent layers. E.g., setting ``num_layers=2``
140
+ /// would mean stacking two GRUs together to form a `stacked GRU`,
141
+ /// with the second GRU taking in outputs of the first GRU and
142
+ /// computing the final results. Default: 1
143
+ TORCH_ARG(int64_t, num_layers) = 1;
144
+ /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
145
+ /// Default: ``true``
146
+ TORCH_ARG(bool, bias) = true;
147
+ /// If ``true``, then the input and output tensors are provided
148
+ /// as (batch, seq, feature). Default: ``false``
149
+ TORCH_ARG(bool, batch_first) = false;
150
+ /// If non-zero, introduces a `Dropout` layer on the outputs of each
151
+ /// GRU layer except the last layer, with dropout probability equal to
152
+ /// `dropout`. Default: 0
153
+ TORCH_ARG(double, dropout) = 0.0;
154
+ /// If ``true``, becomes a bidirectional GRU. Default: ``false``
155
+ TORCH_ARG(bool, bidirectional) = false;
156
+ };
157
+
158
+ namespace detail {
159
+
160
+ /// Common options for RNNCell, LSTMCell and GRUCell modules
161
+ struct TORCH_API RNNCellOptionsBase {
162
+ RNNCellOptionsBase(
163
+ int64_t input_size,
164
+ int64_t hidden_size,
165
+ bool bias,
166
+ int64_t num_chunks);
167
+ TORCH_ARG(int64_t, input_size);
168
+ TORCH_ARG(int64_t, hidden_size);
169
+ TORCH_ARG(bool, bias);
170
+ TORCH_ARG(int64_t, num_chunks);
171
+ };
172
+
173
+ } // namespace detail
174
+
175
+ /// Options for the `RNNCell` module.
176
+ ///
177
+ /// Example:
178
+ /// ```
179
+ /// RNNCell model(RNNCellOptions(20,
180
+ /// 10).bias(false).nonlinearity(torch::kReLU));
181
+ /// ```
182
+ struct TORCH_API RNNCellOptions {
183
+ typedef std::variant<enumtype::kTanh, enumtype::kReLU> nonlinearity_t;
184
+
185
+ RNNCellOptions(int64_t input_size, int64_t hidden_size);
186
+
187
+ /// The number of expected features in the input `x`
188
+ TORCH_ARG(int64_t, input_size);
189
+ /// The number of features in the hidden state `h`
190
+ TORCH_ARG(int64_t, hidden_size);
191
+ /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
192
+ /// Default: ``true``
193
+ TORCH_ARG(bool, bias) = true;
194
+ /// The non-linearity to use. Can be either ``torch::kTanh`` or
195
+ /// ``torch::kReLU``. Default: ``torch::kTanh``
196
+ TORCH_ARG(nonlinearity_t, nonlinearity) = torch::kTanh;
197
+ };
198
+
199
+ /// Options for the `LSTMCell` module.
200
+ ///
201
+ /// Example:
202
+ /// ```
203
+ /// LSTMCell model(LSTMCellOptions(20, 10).bias(false));
204
+ /// ```
205
+ struct TORCH_API LSTMCellOptions {
206
+ LSTMCellOptions(int64_t input_size, int64_t hidden_size);
207
+
208
+ /// The number of expected features in the input `x`
209
+ TORCH_ARG(int64_t, input_size);
210
+ /// The number of features in the hidden state `h`
211
+ TORCH_ARG(int64_t, hidden_size);
212
+ /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
213
+ /// Default: ``true``
214
+ TORCH_ARG(bool, bias) = true;
215
+ };
216
+
217
+ /// Options for the `GRUCell` module.
218
+ ///
219
+ /// Example:
220
+ /// ```
221
+ /// GRUCell model(GRUCellOptions(20, 10).bias(false));
222
+ /// ```
223
+ struct TORCH_API GRUCellOptions {
224
+ GRUCellOptions(int64_t input_size, int64_t hidden_size);
225
+
226
+ /// The number of expected features in the input `x`
227
+ TORCH_ARG(int64_t, input_size);
228
+ /// The number of features in the hidden state `h`
229
+ TORCH_ARG(int64_t, hidden_size);
230
+ /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
231
+ /// Default: ``true``
232
+ TORCH_ARG(bool, bias) = true;
233
+ };
234
+
235
+ } // namespace nn
236
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/enum.h>
6
+ #include <torch/types.h>
7
+
8
+ #include <torch/nn/modules/container/any.h>
9
+ #include <torch/nn/options/transformerlayer.h>
10
+
11
+ namespace torch {
12
+ namespace nn {
13
+
14
+ /// Options for the `Transformer` module
15
+ ///
16
+ /// Example:
17
+ /// ```
18
+ /// TransformerOptions options;
19
+ /// TransformerOptions options(16, 4);
20
+ /// auto options = TransformerOptions().d_model(4).nhead(2).dropout(0.0);
21
+ /// ```
22
+ struct TORCH_API TransformerOptions {
23
+ // The following constructors are commonly used
24
+ // Please don't add more unless it is proved as a common usage
25
+ TransformerOptions() = default;
26
+ TransformerOptions(int64_t d_model, int64_t nhead);
27
+ TransformerOptions(
28
+ int64_t d_model,
29
+ int64_t nhead,
30
+ int64_t num_encoder_layers,
31
+ int64_t num_decoder_layers);
32
+
33
+ /// the number of expected features in the encoder/decoder inputs
34
+ /// (default=512)
35
+ TORCH_ARG(int64_t, d_model) = 512;
36
+
37
+ /// the number of heads in the multiheadattention models (default=8)
38
+ TORCH_ARG(int64_t, nhead) = 8;
39
+
40
+ /// the number of sub-encoder-layers in the encoder (default=6)
41
+ TORCH_ARG(int64_t, num_encoder_layers) = 6;
42
+
43
+ /// the number of sub-decoder-layers in the decoder (default=6)
44
+ TORCH_ARG(int64_t, num_decoder_layers) = 6;
45
+
46
+ /// the dimension of the feedforward network model (default=2048)
47
+ TORCH_ARG(int64_t, dim_feedforward) = 2048;
48
+
49
+ /// the dropout value (default=0.1)
50
+ TORCH_ARG(double, dropout) = 0.1;
51
+
52
+ /// the activation function of encoder/decoder intermediate layer
53
+ /// (default=``torch::kReLU``)
54
+ TORCH_ARG(activation_t, activation) = torch::kReLU;
55
+
56
+ /// custom encoder (default=None)
57
+ TORCH_ARG(AnyModule, custom_encoder);
58
+
59
+ /// custom decoder (default=None)
60
+ TORCH_ARG(AnyModule, custom_decoder);
61
+ };
62
+
63
+ } // namespace nn
64
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformercoder.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/enum.h>
6
+ #include <torch/types.h>
7
+
8
+ #include <torch/nn/modules/container/any.h>
9
+ #include <torch/nn/modules/transformerlayer.h>
10
+
11
+ namespace torch {
12
+ namespace nn {
13
+
14
+ /// Options for the `TransformerEncoder`
15
+ ///
16
+ /// Example:
17
+ /// ```
18
+ /// TransformerEncoderLayer encoderLayer(TransformerEncoderLayerOptions(512,
19
+ /// 8).dropout(0.1)); auto options = TransformerEncoderOptions(encoderLayer,
20
+ /// 6).norm(LayerNorm(LayerNormOptions({2})));
21
+ /// ```
22
+ struct TORCH_API TransformerEncoderOptions {
23
+ // This constructor will keep a shallow copy of encoder_layer, so it keeps all
24
+ // the data in encoder_layer.
25
+ TransformerEncoderOptions(
26
+ TransformerEncoderLayer encoder_layer,
27
+ int64_t num_layers);
28
+ // This constructor will create a new TransformerEncoderLayer obj based on
29
+ // passed in encoder_layer_options.
30
+ TransformerEncoderOptions(
31
+ const TransformerEncoderLayerOptions& encoder_layer_options,
32
+ int64_t num_layers);
33
+
34
+ /// transformer Encoder Layer
35
+ TORCH_ARG(TransformerEncoderLayer, encoder_layer) = nullptr;
36
+
37
+ /// number of encoder layers
38
+ TORCH_ARG(int64_t, num_layers);
39
+
40
+ /// normalization module
41
+ TORCH_ARG(AnyModule, norm);
42
+ };
43
+
44
+ /// Options for the `TransformerDecoder` module.
45
+ ///
46
+ /// Example:
47
+ /// ```
48
+ /// TransformerDecoderLayer decoder_layer(TransformerDecoderLayerOptions(512,
49
+ /// 8).dropout(0.1)); auto options = TransformerDecoderOptions(decoder_layer,
50
+ /// 6)norm(LayerNorm(LayerNormOptions({2}))); TransformerDecoder
51
+ /// transformer_decoder(options);
52
+ /// ```
53
+ struct TORCH_API TransformerDecoderOptions {
54
+ // This constructor will keep the a ref of passed in decoder_layer,
55
+ // so it keeps all the data in decoder_layer.
56
+ TransformerDecoderOptions(
57
+ TransformerDecoderLayer decoder_layer,
58
+ int64_t num_layers);
59
+ // This constructor will create a new TransformerDecoderLayer obj,
60
+ // based on passed in decoder_layer_options.
61
+ TransformerDecoderOptions(
62
+ const TransformerDecoderLayerOptions& decoder_layer_options,
63
+ int64_t num_layers);
64
+
65
+ /// decoder layer to be cloned
66
+ TORCH_ARG(TransformerDecoderLayer, decoder_layer) = nullptr;
67
+
68
+ /// number of decoder layers
69
+ TORCH_ARG(int64_t, num_layers);
70
+
71
+ /// normalization module
72
+ TORCH_ARG(AnyModule, norm);
73
+ };
74
+
75
+ } // namespace nn
76
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformerlayer.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/enum.h>
6
+ #include <torch/types.h>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+
11
+ using activation_t = std::variant<
12
+ enumtype::kReLU,
13
+ enumtype::kGELU,
14
+ std::function<Tensor(const Tensor&)>>;
15
+
16
+ /// Options for the `TransformerEncoderLayer`
17
+ ///
18
+ /// Example:
19
+ /// ```
20
+ /// auto options = TransformerEncoderLayer(512, 8).dropout(0.2);
21
+ /// ```
22
+ struct TORCH_API TransformerEncoderLayerOptions {
23
+ /* implicit */ TransformerEncoderLayerOptions(int64_t d_model, int64_t nhead);
24
+
25
+ /// the number of expected features in the input
26
+ TORCH_ARG(int64_t, d_model);
27
+
28
+ /// the number of heads in the multiheadattention models
29
+ TORCH_ARG(int64_t, nhead);
30
+
31
+ /// the dimension of the feedforward network model, default is 2048
32
+ TORCH_ARG(int64_t, dim_feedforward) = 2048;
33
+
34
+ /// the dropout value, default is 0.1
35
+ TORCH_ARG(double, dropout) = 0.1;
36
+
37
+ /// the activation function of intermediate layer, can be ``torch::kReLU``,
38
+ /// ``torch::GELU``, or a unary callable. Default: ``torch::kReLU``
39
+ TORCH_ARG(activation_t, activation) = torch::kReLU;
40
+ };
41
+
42
+ // ============================================================================
43
+
44
+ /// Options for the `TransformerDecoderLayer` module.
45
+ ///
46
+ /// Example:
47
+ /// ```
48
+ /// TransformerDecoderLayer model(TransformerDecoderLayerOptions(512,
49
+ /// 8).dropout(0.2));
50
+ /// ```
51
+ struct TORCH_API TransformerDecoderLayerOptions {
52
+ TransformerDecoderLayerOptions(int64_t d_model, int64_t nhead);
53
+
54
+ /// number of expected features in the input
55
+ TORCH_ARG(int64_t, d_model);
56
+
57
+ /// number of heads in the multiheadattention models
58
+ TORCH_ARG(int64_t, nhead);
59
+
60
+ /// dimension of the feedforward network model. Default: 2048
61
+ TORCH_ARG(int64_t, dim_feedforward) = 2048;
62
+
63
+ /// dropout value. Default: 1
64
+ TORCH_ARG(double, dropout) = 0.1;
65
+
66
+ /// activation function of intermediate layer, can be ``torch::kGELU``,
67
+ /// ``torch::kReLU``, or a unary callable. Default: ``torch::kReLU``
68
+ TORCH_ARG(activation_t, activation) = torch::kReLU;
69
+ };
70
+
71
+ } // namespace nn
72
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/upsampling.h ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/enum.h>
6
+ #include <torch/expanding_array.h>
7
+ #include <torch/types.h>
8
+
9
+ #include <vector>
10
+
11
+ namespace torch {
12
+ namespace nn {
13
+
14
+ /// Options for the `Upsample` module.
15
+ ///
16
+ /// Example:
17
+ /// ```
18
+ /// Upsample
19
+ /// model(UpsampleOptions().scale_factor(std::vector<double>({3})).mode(torch::kLinear).align_corners(false));
20
+ /// ```
21
+ struct TORCH_API UpsampleOptions {
22
+ /// output spatial sizes.
23
+ TORCH_ARG(c10::optional<std::vector<int64_t>>, size) = c10::nullopt;
24
+
25
+ /// multiplier for spatial size.
26
+ TORCH_ARG(c10::optional<std::vector<double>>, scale_factor) = c10::nullopt;
27
+
28
+ /// the upsampling algorithm: one of "nearest", "linear", "bilinear",
29
+ /// "bicubic" and "trilinear". Default: "nearest"
30
+ typedef std::variant<
31
+ enumtype::kNearest,
32
+ enumtype::kLinear,
33
+ enumtype::kBilinear,
34
+ enumtype::kBicubic,
35
+ enumtype::kTrilinear>
36
+ mode_t;
37
+ TORCH_ARG(mode_t, mode) = torch::kNearest;
38
+
39
+ /// if "True", the corner pixels of the input and output tensors are
40
+ /// aligned, and thus preserving the values at those pixels. This only has
41
+ /// effect when :attr:`mode` is "linear", "bilinear", "bicubic", or
42
+ /// "trilinear". Default: "False"
43
+ TORCH_ARG(c10::optional<bool>, align_corners) = c10::nullopt;
44
+ };
45
+
46
+ namespace functional {
47
+
48
+ /// Options for `torch::nn::functional::interpolate`.
49
+ ///
50
+ /// Example:
51
+ /// ```
52
+ /// namespace F = torch::nn::functional;
53
+ /// F::interpolate(input,
54
+ /// F::InterpolateFuncOptions().size(std::vector<int64_t>({4})).mode(torch::kNearest));
55
+ /// ```
56
+ struct TORCH_API InterpolateFuncOptions {
57
+ typedef std::variant<
58
+ enumtype::kNearest,
59
+ enumtype::kLinear,
60
+ enumtype::kBilinear,
61
+ enumtype::kBicubic,
62
+ enumtype::kTrilinear,
63
+ enumtype::kArea,
64
+ enumtype::kNearestExact>
65
+ mode_t;
66
+
67
+ /// output spatial sizes.
68
+ TORCH_ARG(c10::optional<std::vector<int64_t>>, size) = c10::nullopt;
69
+
70
+ /// multiplier for spatial size.
71
+ TORCH_ARG(c10::optional<std::vector<double>>, scale_factor) = c10::nullopt;
72
+
73
+ /// the upsampling algorithm: one of "nearest", "linear", "bilinear",
74
+ /// "bicubic", "trilinear", "area", "nearest-exact". Default: "nearest"
75
+ TORCH_ARG(mode_t, mode) = torch::kNearest;
76
+
77
+ /// Geometrically, we consider the pixels of the input and output as squares
78
+ /// rather than points. If set to "True", the input and output tensors are
79
+ /// aligned by the center points of their corner pixels, preserving the values
80
+ /// at the corner pixels. If set to "False", the input and output tensors
81
+ /// are aligned by the corner points of their corner pixels, and the
82
+ /// interpolation uses edge value padding for out-of-boundary values, making
83
+ /// this operation *independent* of input size when `scale_factor` is
84
+ /// kept the same. It is *required* when interpolating mode is "linear",
85
+ /// "bilinear", "bicubic" or "trilinear". Default: "False"
86
+ TORCH_ARG(c10::optional<bool>, align_corners) = c10::nullopt;
87
+
88
+ /// recompute the scale_factor for use in the
89
+ /// interpolation calculation. When `scale_factor` is passed as a parameter,
90
+ /// it is used to compute the `output_size`. If `recompute_scale_factor` is
91
+ /// `true` or not specified, a new `scale_factor` will be computed based on
92
+ /// the output and input sizes for use in the interpolation computation (i.e.
93
+ /// the computation will be identical to if the computed `output_size` were
94
+ /// passed-in explicitly). Otherwise, the passed-in `scale_factor` will be
95
+ /// used in the interpolation computation. Note that when `scale_factor` is
96
+ /// floating-point, the recomputed scale_factor may differ from the one passed
97
+ /// in due to rounding and precision issues.
98
+ TORCH_ARG(c10::optional<bool>, recompute_scale_factor) = c10::nullopt;
99
+
100
+ /// flag to apply anti-aliasing. Using anti-alias
101
+ /// option together with :attr:`align_corners` equals "False", interpolation
102
+ /// result would match Pillow result for downsampling operation. Supported
103
+ /// modes: "bilinear". Default: "False".
104
+ TORCH_ARG(bool, antialias) = false;
105
+ };
106
+
107
+ } // namespace functional
108
+
109
+ } // namespace nn
110
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/vision.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/enum.h>
6
+ #include <torch/types.h>
7
+
8
+ namespace torch {
9
+ namespace nn {
10
+ namespace functional {
11
+
12
+ /// Options for `torch::nn::functional::grid_sample`.
13
+ ///
14
+ /// Example:
15
+ /// ```
16
+ /// namespace F = torch::nn::functional;
17
+ /// F::grid_sample(input, grid,
18
+ /// F::GridSampleFuncOptions().mode(torch::kBilinear).padding_mode(torch::kZeros).align_corners(true));
19
+ /// ```
20
+ struct TORCH_API GridSampleFuncOptions {
21
+ typedef std::variant<enumtype::kBilinear, enumtype::kNearest> mode_t;
22
+ typedef std::
23
+ variant<enumtype::kZeros, enumtype::kBorder, enumtype::kReflection>
24
+ padding_mode_t;
25
+
26
+ /// interpolation mode to calculate output values. Default: Bilinear
27
+ TORCH_ARG(mode_t, mode) = torch::kBilinear;
28
+ /// padding mode for outside grid values. Default: Zeros
29
+ TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros;
30
+ /// Specifies perspective to pixel as point. Default: false
31
+ TORCH_ARG(c10::optional<bool>, align_corners) = c10::nullopt;
32
+ };
33
+
34
+ } // namespace functional
35
+ } // namespace nn
36
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cache_entry.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <Python.h>
4
+
5
+ #ifdef __cplusplus
6
+
7
+ #include <torch/csrc/dynamo/utils.h>
8
+ #include <torch/csrc/utils/pybind.h>
9
+ #include <list>
10
+
11
+ namespace py = pybind11;
12
+
13
+ extern "C" {
14
+
15
+ #endif
16
+
17
+ /*
18
+ Our cache resides on the extra scratch space of the code object. The structure
19
+ of the cache is as follows:
20
+
21
+ -> ExtraState
22
+ -> CacheEntry (list)
23
+ -> check_fn
24
+ -> code
25
+ -> FrameState
26
+
27
+ CacheEntry is a linked list node containing the check_fn for guards
28
+ and the optimized code.
29
+
30
+ The FrameState is a PyDict that enables sharing between different frames. This
31
+ is used to detect dynamism in automatic dynamic shapes.
32
+
33
+ These two are encapsulated into a ExtraState.
34
+ */
35
+
36
+ typedef struct CacheEntry CacheEntry;
37
+ typedef struct ExtraState ExtraState;
38
+
39
+ #ifdef __cplusplus
40
+
41
+ typedef struct VISIBILITY_HIDDEN CacheEntry {
42
+ // check the guards: lambda: <locals of user function>: bool
43
+ py::object check_fn;
44
+ // modified user bytecode (protected by check_fn's guards)
45
+ py::object code;
46
+ // Reference to owning ExtraState
47
+ ExtraState* _owner{nullptr};
48
+ // Reference to this CacheEntry's location in owner's linked list
49
+ std::list<CacheEntry>::iterator _owner_loc;
50
+
51
+ CacheEntry(const py::handle& guarded_code);
52
+ ~CacheEntry();
53
+
54
+ // Warning: returns a reference whose lifetime is controlled by C++
55
+ py::object next();
56
+ } CacheEntry;
57
+
58
+ #endif
59
+
60
+ // Returns borrowed reference
61
+ PyCodeObject* CacheEntry_get_code(CacheEntry* e);
62
+
63
+ // Returns a borrowed reference to CacheEntry as a PyObject
64
+ // Warning: lifetime is controlled by C++
65
+ PyObject* CacheEntry_to_obj(CacheEntry* e);
66
+
67
+ #ifdef __cplusplus
68
+ } // extern "C"
69
+ #endif
venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/compiled_autograd.h ADDED
@@ -0,0 +1,713 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/impl/TorchDispatchModeTLS.h>
3
+ #include <torch/csrc/autograd/engine.h>
4
+ #include <torch/csrc/autograd/variable_info.h>
5
+ #include <torch/csrc/utils/python_stub.h>
6
+ #include <torch/csrc/utils/torch_dispatch_mode.h>
7
+ #include <typeindex>
8
+ #include <vector>
9
+
10
+ // see [Note: Compiled Autograd]
11
+
12
+ namespace torch::dynamo::autograd {
13
+ using namespace torch::autograd;
14
+
15
+ struct SizeInput {
16
+ // Note: int value is still needed when dynamic to pass as an arg
17
+ enum DynType : uint8_t { STATIC = 0, DYNAMIC = 1 };
18
+ SizeInput(DynType dt, int64_t v) : dyn_type(dt), value(v) {}
19
+ DynType dyn_type;
20
+ int64_t value;
21
+ };
22
+
23
+ struct CacheKeyBuffer {
24
+ CacheKeyBuffer(const uint8_t* key, uint16_t len) : data(new uint8_t[len]) {
25
+ std::memcpy(data.get(), key, len);
26
+ }
27
+ const uint8_t* get() const {
28
+ return data.get();
29
+ }
30
+
31
+ private:
32
+ std::unique_ptr<uint8_t[]> data;
33
+ };
34
+
35
+ struct CacheKey {
36
+ // Key to find the next node in the shadow graph. We use C++ RTTI for the
37
+ // type of the node (ntype), then a key generated with a visitor pattern.
38
+ CacheKey(const std::type_index& ntype, const uint8_t* key, uint16_t len)
39
+ : node_type(ntype), key_size(len), key(key) {}
40
+
41
+ bool operator<(const CacheKey& other) const {
42
+ if (node_type != other.node_type) {
43
+ return node_type < other.node_type;
44
+ }
45
+ if (key_size != other.key_size) {
46
+ return key_size < other.key_size;
47
+ }
48
+ return std::memcmp(key, other.key, key_size) < 0;
49
+ }
50
+
51
+ bool operator==(const CacheKey& other) const {
52
+ return node_type == other.node_type && key_size == other.key_size &&
53
+ std::memcmp(key, other.key, key_size) == 0;
54
+ }
55
+
56
+ size_t hash() const {
57
+ // don't bother hashing the key data, common case 1 cache entry per node
58
+ return std::hash<std::type_index>()(node_type) ^ key_size;
59
+ }
60
+
61
+ std::type_index node_type;
62
+ uint16_t key_size;
63
+ const uint8_t* key;
64
+ };
65
+
66
+ struct NodeCall {
67
+ NodeCall(uint32_t id_, std::shared_ptr<Node> node_)
68
+ : id(id_), node(std::move(node_)) {}
69
+
70
+ void mark_output(int input_nr, int output_idx) {
71
+ graph_output.emplace_back(std::make_pair(input_nr, output_idx));
72
+ }
73
+
74
+ uint32_t id;
75
+ std::shared_ptr<Node> node;
76
+ std::vector<std::pair<int, int>> tensor_pre_hooks;
77
+ std::vector<int> pre_hooks;
78
+ std::vector<int> post_hooks;
79
+ std::vector<int> post_acc_grad_hooks;
80
+ std::vector<std::pair<int, int>> graph_output;
81
+ bool needed = true;
82
+ };
83
+
84
+ struct NodeCalls : public std::unordered_map<Node*, NodeCall> {
85
+ NodeCall& lookup(const std::shared_ptr<Node>& function) {
86
+ auto it = find(function.get());
87
+ if (it == end()) {
88
+ it = emplace(function.get(), NodeCall(_next_id++, function)).first;
89
+ }
90
+ return it->second;
91
+ }
92
+
93
+ private:
94
+ uint32_t _next_id = 0;
95
+ };
96
+
97
+ struct TensorArg {
98
+ // Represents a de-duplicated tensor that will be passed into the graph
99
+ TensorArg(uint32_t i = 0) : id(i) {}
100
+ uint32_t index() const {
101
+ TORCH_INTERNAL_ASSERT(defined());
102
+ return id - 1;
103
+ }
104
+ bool defined() const {
105
+ return id != 0;
106
+ }
107
+ uint32_t id;
108
+ at::Tensor proxy_tensor;
109
+ };
110
+
111
+ struct TensorArgs {
112
+ // Manages a collection of TensorArgs and mappings from Tensors/SavedVariables
113
+ // to them. This also allows us to unpack SavedVariable exactly once and
114
+ // store the unpacked Tensor.
115
+
116
+ TensorArg& lookup(const at::Tensor& tensor, bool create = false) {
117
+ if (!tensor.defined()) {
118
+ return _undefined;
119
+ }
120
+ auto impl = tensor.unsafeGetTensorImpl();
121
+ auto it = _args.find(impl);
122
+ if (it == _args.end()) {
123
+ TORCH_INTERNAL_ASSERT(create && inputs.size() == _next_id - 1);
124
+ it = _args.emplace(impl, TensorArg(_next_id++)).first;
125
+ inputs.emplace_back(tensor);
126
+ }
127
+ return it->second;
128
+ }
129
+
130
+ TensorArg& lookup(const SavedVariable& sv) {
131
+ auto it = _saved_variables.find(&sv);
132
+ TORCH_INTERNAL_ASSERT(it != _saved_variables.end());
133
+ return *it->second;
134
+ }
135
+
136
+ TensorArg& add(const at::Tensor& tensor) {
137
+ return lookup(tensor, true);
138
+ }
139
+
140
+ TensorArg& add(const SavedVariable& sv, const std::shared_ptr<Node>& node) {
141
+ // TODO(jansel): Here we unpack the SavedVariable exactly once. This might
142
+ // fire SavedTensor hooks. In the future we should try to put saved tensor
143
+ // hooks into the graph.
144
+ at::Tensor tensor = sv.unpack(node);
145
+ TensorArg& arg = add(tensor);
146
+ _saved_variables.emplace(&sv, &arg);
147
+ return arg;
148
+ }
149
+
150
+ // the concrete tensors that will get passed into the graph as inputs
151
+ std::vector<at::Tensor> inputs;
152
+
153
+ private:
154
+ std::unordered_map<const c10::TensorImpl*, TensorArg> _args;
155
+ // Every TensorArg from this is actually owned by _args (or _undefined) and
156
+ // that's why we have an un-owned pointer here.
157
+ std::unordered_map<const SavedVariable*, TensorArg*> _saved_variables;
158
+ TensorArg _undefined;
159
+ uint32_t _next_id = 1; // id=0 used by _undefined
160
+ };
161
+
162
+ struct AutogradCompilerCall {
163
+ void add_size_input(const c10::SymInt& s) {
164
+ all_size_inputs.emplace_back(
165
+ SizeInput(default_dyn_type, s.guard_int(__FILE__, __LINE__)));
166
+ }
167
+
168
+ int emplace_hook(c10::SafePyObject&& fn) {
169
+ hooks.emplace_back(std::move(fn));
170
+ return hooks.size() - 1;
171
+ }
172
+
173
+ TensorArgs tensor_args;
174
+ std::vector<SizeInput> all_size_inputs;
175
+ std::vector<int64_t> dyn_size_inputs;
176
+ std::vector<c10::SafePyObject> hooks;
177
+ NodeCalls node_calls;
178
+ SizeInput::DynType default_dyn_type = SizeInput::STATIC;
179
+ };
180
+
181
+ class CompiledNodeArgs {
182
+ // CompiledNodeArgs builds a representation of the constant values found
183
+ // across all the nodes in the compiled graph, via 'collect' overloads. The
184
+ // collected constants are specialized on by concatenation into a cache key.
185
+ // Tensor, symint arguments (which are lifted to become graph inputs rather
186
+ // than specialized on) are forwarded to the compiler and not included in the
187
+ // key.
188
+ public:
189
+ void collect(const TensorArg& t) {
190
+ collect_size(t.id);
191
+ if (t.defined()) {
192
+ const at::Tensor& tensor = _compiler.tensor_args.inputs[t.index()];
193
+ // including these in the cache key means dynamo-level tensor guards can
194
+ // be skipped
195
+ collect(tensor.device());
196
+ collect(tensor.dtype());
197
+ collect(tensor.requires_grad());
198
+ }
199
+ }
200
+
201
+ void collect(const at::Tensor& t) {
202
+ collect(_compiler.tensor_args.add(t));
203
+ }
204
+ void collect(const SavedVariable& t) {
205
+ collect(_compiler.tensor_args.add(t, _node_call.node));
206
+ }
207
+ void collect(const c10::SymInt& t) {
208
+ _compiler.add_size_input(t);
209
+ }
210
+ template <typename T>
211
+ void collect(const std::vector<T>& t) {
212
+ collect_size(t.size());
213
+ for (const T& i : t) {
214
+ collect(i);
215
+ }
216
+ }
217
+ template <typename T>
218
+ void collect(const c10::ArrayRef<T>& t) {
219
+ collect_size(t.size());
220
+ for (const T& i : t) {
221
+ collect(i);
222
+ }
223
+ }
224
+ template <typename T>
225
+ void collect(const c10::OptionalArray<T>& t) {
226
+ collect(t.list);
227
+ }
228
+ template <typename T>
229
+ void collect(const c10::optional<T>& t) {
230
+ if (cond(t.has_value())) {
231
+ collect(*t);
232
+ }
233
+ }
234
+ template <typename A, typename B>
235
+ void collect(const std::pair<A, B>& t) {
236
+ collect(t.first);
237
+ collect(t.second);
238
+ }
239
+ void collect(const c10::Scalar& t) {
240
+ auto type = t.type();
241
+ specialize_on_bytes(type);
242
+ if (type == c10::ScalarType::Double) {
243
+ collect(t.toDouble());
244
+ } else if (type == c10::ScalarType::Long) {
245
+ collect(t.toLong());
246
+ } else if (type == c10::ScalarType::Bool) {
247
+ collect(t.toBool());
248
+ } else if (type == c10::ScalarType::ComplexDouble) {
249
+ auto c = t.toComplexDouble();
250
+ collect(c.real());
251
+ collect(c.imag());
252
+ } else {
253
+ TORCH_INTERNAL_ASSERT(false);
254
+ }
255
+ }
256
+ void collect(const c10::TensorOptions& t) {
257
+ collect(t.device());
258
+ collect(t.dtype());
259
+ collect(t.layout());
260
+ collect(t.requires_grad());
261
+ collect(t.pinned_memory());
262
+ collect(t.memory_format_opt());
263
+ }
264
+ void collect(const at::TensorGeometry& t) {
265
+ collect(t.sym_sizes());
266
+ collect(t.sym_strides());
267
+ collect(t.sym_storage_offset());
268
+ }
269
+ void collect(const torch::autograd::TypeAndSize& t) {
270
+ collect(t.sym_sizes);
271
+ collect(t.options);
272
+ }
273
+ void collect(const c10::Device& t) {
274
+ collect(t.type());
275
+ collect(t.index());
276
+ }
277
+ void collect(const std::string& t) {
278
+ collect_size(t.size());
279
+ for (char c : t) {
280
+ collect(c);
281
+ }
282
+ }
283
+ void collect(const caffe2::TypeMeta& t) {
284
+ specialize_on_bytes(t.id());
285
+ }
286
+ void collect(const std::shared_ptr<Node>& t) {
287
+ // Note: this is only capturing the ID of the node not everything
288
+ // contained inside it. This is used for tracking connections between
289
+ // nodes and the actual details of the node itself must be handled by
290
+ // a seperate call to `node->compiled_args()`.
291
+ if (cond((bool)t)) {
292
+ collect(_compiler.node_calls.lookup(t));
293
+ }
294
+ }
295
+ void collect(const NodeCall& t) {
296
+ collect_size(t.id);
297
+ collect(t.graph_output);
298
+ collect_hooks_from(t.node.get());
299
+ }
300
+ void collect(const Edge& t) {
301
+ if (cond(t.is_valid())) {
302
+ collect_size(_compiler.node_calls.lookup(t.function).id);
303
+ collect_size(t.input_nr);
304
+ collect(t.function->input_metadata(t.input_nr)); // for validate_outputs
305
+ }
306
+ }
307
+ void collect(const InputMetadata& t) {
308
+ TORCH_CHECK(!t.is_nested_tensor(), "NestedTensor not implemented");
309
+ collect(t.options());
310
+ collect(t.is_tensor_subclass());
311
+ collect(t.shape_as_dim_vector());
312
+ }
313
+ void collect(const VariableInfo& t) {
314
+ collect(t.layout);
315
+ collect(t.device);
316
+ collect(t.scalar_type);
317
+ collect(t.size);
318
+ collect(t.requires_grad);
319
+ collect(t.is_empty);
320
+ }
321
+ bool cond(bool cond) {
322
+ collect(cond);
323
+ return cond;
324
+ }
325
+
326
+ #define COLLECT_AS_BYTES(T) \
327
+ void collect(T t) { \
328
+ specialize_on_bytes(t); \
329
+ }
330
+ COLLECT_AS_BYTES(c10::ScalarType);
331
+ COLLECT_AS_BYTES(c10::DeviceType);
332
+ COLLECT_AS_BYTES(c10::Layout);
333
+ COLLECT_AS_BYTES(c10::MemoryFormat);
334
+ COLLECT_AS_BYTES(int8_t);
335
+ COLLECT_AS_BYTES(int16_t);
336
+ COLLECT_AS_BYTES(int32_t);
337
+ COLLECT_AS_BYTES(int64_t);
338
+ COLLECT_AS_BYTES(uint8_t);
339
+ COLLECT_AS_BYTES(uint16_t);
340
+ COLLECT_AS_BYTES(uint32_t);
341
+ COLLECT_AS_BYTES(uint64_t);
342
+ COLLECT_AS_BYTES(bool);
343
+ COLLECT_AS_BYTES(float);
344
+ COLLECT_AS_BYTES(double);
345
+ #undef COLLECT_AS_BYTES
346
+
347
+ void collect_hooks_from(Node* fn) {
348
+ TORCH_CHECK(
349
+ fn->retains_grad_hooks().empty(),
350
+ "retains_grad_hooks not implemented for compiled autograd");
351
+ for (auto& i : fn->tensor_pre_hooks()) {
352
+ i->compiled_args(*this);
353
+ }
354
+ for (auto& i : fn->pre_hooks()) {
355
+ i->compiled_args(*this);
356
+ }
357
+ for (auto& i : fn->post_hooks()) {
358
+ i->compiled_args(*this);
359
+ }
360
+ collect_size(_node_call.tensor_pre_hooks.size());
361
+ collect_size(_node_call.pre_hooks.size());
362
+ collect_size(_node_call.post_hooks.size());
363
+ for (const auto& h : _node_call.tensor_pre_hooks) {
364
+ collect_size(h.second); // index
365
+ }
366
+ }
367
+
368
+ CacheKey key() const {
369
+ Node* node = _node_call.node.get();
370
+ return CacheKey(
371
+ typeid(*node), _specialization_key, _specialization_key_size);
372
+ }
373
+
374
+ int add_backward(c10::SafePyObject&& obj) {
375
+ return _compiler.emplace_hook(std::move(obj));
376
+ }
377
+
378
+ int add_backward_state(c10::SafePyObject&& obj) {
379
+ return _compiler.emplace_hook(std::move(obj));
380
+ }
381
+
382
+ void add_tensor_pre_hook(c10::SafePyObject&& obj, int index) {
383
+ auto fn_id = _compiler.emplace_hook(std::move(obj));
384
+ collect_size(fn_id);
385
+ _node_call.tensor_pre_hooks.emplace_back(std::make_pair(fn_id, index));
386
+ }
387
+
388
+ void add_pre_hook(c10::SafePyObject&& obj) {
389
+ auto fn_id = _compiler.emplace_hook(std::move(obj));
390
+ collect_size(fn_id);
391
+ _node_call.pre_hooks.emplace_back(fn_id);
392
+ }
393
+
394
+ void add_post_hook(c10::SafePyObject&& obj) {
395
+ auto fn_id = _compiler.emplace_hook(std::move(obj));
396
+ collect_size(fn_id);
397
+ _node_call.post_hooks.emplace_back(fn_id);
398
+ }
399
+
400
+ void add_post_acc_grad_hook(c10::SafePyObject&& obj) {
401
+ auto fn_id = _compiler.emplace_hook(std::move(obj));
402
+ collect_size(fn_id);
403
+ _node_call.post_acc_grad_hooks.emplace_back(fn_id);
404
+ }
405
+
406
+ void collect_size(size_t s) {
407
+ // we expect sizes to be small, so try to cram them into a single byte
408
+ constexpr uint8_t encode_as_u64 = std::numeric_limits<uint8_t>::max();
409
+ constexpr uint8_t encode_as_u32 = encode_as_u64 - 1;
410
+ constexpr uint8_t encode_as_u16 = encode_as_u64 - 2;
411
+ if (C10_UNLIKELY(s >= encode_as_u16)) {
412
+ // first write a byte indicating the path we followed, then the data
413
+ if (s <= std::numeric_limits<uint16_t>::max()) {
414
+ // 3 bytes
415
+ specialize_on_bytes(encode_as_u16);
416
+ specialize_on_bytes(static_cast<uint16_t>(s));
417
+ } else if (s <= std::numeric_limits<uint32_t>::max()) {
418
+ // 5 bytes
419
+ specialize_on_bytes(encode_as_u32);
420
+ specialize_on_bytes(static_cast<uint32_t>(s));
421
+ } else {
422
+ // 9 bytes
423
+ specialize_on_bytes(encode_as_u64);
424
+ specialize_on_bytes(s);
425
+ }
426
+ } else {
427
+ // happy case, 1 byte
428
+ specialize_on_bytes(static_cast<uint8_t>(s));
429
+ }
430
+ }
431
+
432
+ SizeInput::DynType set_default_dyn_type(SizeInput::DynType default_dyn_type) {
433
+ return std::exchange(_compiler.default_dyn_type, default_dyn_type);
434
+ }
435
+
436
+ CompiledNodeArgs(AutogradCompilerCall& compiler, NodeCall& node_call)
437
+ : _compiler(compiler),
438
+ _node_call(node_call),
439
+ _specialization_key_size(0),
440
+ _specialization_key_storage(1024),
441
+ _specialization_key(
442
+ (uint8_t*)std::malloc(_specialization_key_storage)) {}
443
+ ~CompiledNodeArgs() {
444
+ std::free(_specialization_key);
445
+ }
446
+ CompiledNodeArgs(const CompiledNodeArgs&) = delete;
447
+
448
+ private:
449
+ template <typename T>
450
+ void specialize_on_bytes(const T& t) {
451
+ while (C10_UNLIKELY(
452
+ _specialization_key_size + sizeof(T) > _specialization_key_storage)) {
453
+ _specialization_key_storage *= 2;
454
+ _specialization_key = (uint8_t*)std::realloc(
455
+ _specialization_key, _specialization_key_storage);
456
+ }
457
+ std::memcpy(_specialization_key + _specialization_key_size, &t, sizeof(T));
458
+ _specialization_key_size += sizeof(T);
459
+ }
460
+
461
+ AutogradCompilerCall& _compiler;
462
+ NodeCall& _node_call;
463
+ size_t _specialization_key_size;
464
+ size_t _specialization_key_storage;
465
+ uint8_t* _specialization_key;
466
+ };
467
+
468
+ struct TraceState {
469
+ TraceState(
470
+ const std::vector<c10::optional<c10::SymInt>>& ss,
471
+ size_t num_outputs)
472
+ : sym_sizes_index(0), sym_sizes(ss), outputs(num_outputs) {}
473
+
474
+ void debug_asserts() {
475
+ TORCH_INTERNAL_ASSERT(sym_sizes_index == sym_sizes.size());
476
+ }
477
+ c10::optional<c10::SymInt> next_sym_size() {
478
+ TORCH_INTERNAL_ASSERT(sym_sizes_index < sym_sizes.size());
479
+ return sym_sizes[sym_sizes_index++];
480
+ }
481
+
482
+ size_t sym_sizes_index;
483
+ std::vector<c10::optional<c10::SymInt>> sym_sizes;
484
+ variable_list outputs;
485
+ };
486
+
487
+ class SwapSavedVariables {
488
+ // SwapSavedVariables is used during the tracing/compilation phase after a
489
+ // cache-miss. It swaps any 'lifted' inputs (tensors, symints) to proxy nodes,
490
+ // allows tracing to happen, then swaps them back afterwards.
491
+ public:
492
+ void before(at::Tensor& t) {
493
+ TensorArg& arg = compiler.tensor_args.lookup(t);
494
+ stashed_tensors.save(&t, std::move(t));
495
+ if (arg.defined()) {
496
+ TORCH_INTERNAL_ASSERT(arg.proxy_tensor.defined());
497
+ t = arg.proxy_tensor;
498
+ }
499
+ }
500
+ void after(at::Tensor& t) {
501
+ stashed_tensors.restore(&t);
502
+ }
503
+
504
+ void before(SavedVariable& t) {
505
+ TensorArg& arg = compiler.tensor_args.lookup(t);
506
+ stashed_variables.save(&t, std::move(t));
507
+ if (arg.defined()) {
508
+ TORCH_INTERNAL_ASSERT(arg.proxy_tensor.defined());
509
+ t = SavedVariable(arg.proxy_tensor, false);
510
+ }
511
+ }
512
+ void after(SavedVariable& t) {
513
+ stashed_variables.restore(&t);
514
+ }
515
+
516
+ void before(c10::SymInt& t) {
517
+ stashed_symints.save(&t, c10::SymInt(t));
518
+ auto opt_value = state.next_sym_size();
519
+ if (opt_value.has_value()) {
520
+ t = *opt_value; // dynamic shape
521
+ }
522
+ }
523
+ void after(c10::SymInt& t) {
524
+ stashed_symints.restore(&t);
525
+ }
526
+
527
+ void before(Edge& t) {
528
+ if (t.is_valid()) {
529
+ // need for symints used by validate_outputs
530
+ before(t.function->mutable_input_metadata(t.input_nr));
531
+ }
532
+ }
533
+ void after(Edge& t) {
534
+ if (t.is_valid()) {
535
+ after(t.function->mutable_input_metadata(t.input_nr));
536
+ }
537
+ }
538
+ void before(InputMetadata& t) {
539
+ before(t.mutable_shape_as_dim_vector());
540
+ }
541
+ void after(InputMetadata& t) {
542
+ after(t.mutable_shape_as_dim_vector());
543
+ }
544
+ void before(at::TensorGeometry& t) {
545
+ before(t.mutable_sizes());
546
+ before(t.mutable_strides());
547
+ before(t.mutable_storage_offset());
548
+ t.recompute();
549
+ }
550
+ void after(at::TensorGeometry& t) {
551
+ after(t.mutable_sizes());
552
+ after(t.mutable_strides());
553
+ after(t.mutable_storage_offset());
554
+ t.recompute();
555
+ }
556
+ void before(torch::autograd::TypeAndSize& t) {
557
+ before(t.sym_sizes);
558
+ before(t.options);
559
+ }
560
+ void after(torch::autograd::TypeAndSize& t) {
561
+ after(t.sym_sizes);
562
+ after(t.options);
563
+ }
564
+ void before(VariableInfo& t) {
565
+ before(t.size);
566
+ }
567
+ void after(VariableInfo& t) {
568
+ after(t.size);
569
+ }
570
+
571
+ template <typename T>
572
+ void before(std::vector<T>& t) {
573
+ for (T& i : t) {
574
+ before(i);
575
+ }
576
+ }
577
+ template <typename T>
578
+ void after(std::vector<T>& t) {
579
+ for (T& i : t) {
580
+ after(i);
581
+ }
582
+ }
583
+ template <typename T, unsigned N>
584
+ void before(c10::SmallVector<T, N>& t) {
585
+ for (T& i : t) {
586
+ before(i);
587
+ }
588
+ }
589
+ template <typename T, unsigned N>
590
+ void after(c10::SmallVector<T, N>& t) {
591
+ for (T& i : t) {
592
+ after(i);
593
+ }
594
+ }
595
+
596
+ template <typename T>
597
+ void before(c10::OptionalArray<T>& t) {
598
+ before(t.list);
599
+ }
600
+ template <typename T>
601
+ void after(c10::OptionalArray<T>& t) {
602
+ after(t.list);
603
+ }
604
+
605
+ template <typename T>
606
+ void before(c10::optional<T>& t) {
607
+ if (t.has_value()) {
608
+ before(*t);
609
+ }
610
+ }
611
+ template <typename T>
612
+ void after(c10::optional<T>& t) {
613
+ if (t.has_value()) {
614
+ after(*t);
615
+ }
616
+ }
617
+
618
+ #define NO_OP_VISIT(T) \
619
+ void before(const T&) {} \
620
+ void after(const T&) {}
621
+ NO_OP_VISIT(caffe2::TypeMeta);
622
+ NO_OP_VISIT(c10::Device);
623
+ NO_OP_VISIT(c10::DeviceType);
624
+ NO_OP_VISIT(c10::Layout);
625
+ NO_OP_VISIT(c10::MemoryFormat);
626
+ NO_OP_VISIT(c10::ScalarType);
627
+ NO_OP_VISIT(c10::Scalar);
628
+ NO_OP_VISIT(c10::TensorOptions);
629
+ NO_OP_VISIT(std::string);
630
+ NO_OP_VISIT(int64_t);
631
+ NO_OP_VISIT(bool);
632
+ NO_OP_VISIT(double);
633
+ #undef NO_OP_VISIT
634
+
635
+ SwapSavedVariables(
636
+ AutogradCompilerCall& c,
637
+ TraceState& s,
638
+ PyObject* p,
639
+ const NodeCall& n)
640
+ : compiler(c), state(s), py_compiler(p), curr_node_call(n) {}
641
+
642
+ PyObject* get_py_compiler() {
643
+ return py_compiler;
644
+ }
645
+
646
+ const NodeCall& get_curr_node_call() {
647
+ return curr_node_call;
648
+ }
649
+
650
+ void debug_asserts() {
651
+ stashed_variables.debug_assert();
652
+ stashed_tensors.debug_assert();
653
+ stashed_symints.debug_assert();
654
+ }
655
+
656
+ private:
657
+ template <typename T>
658
+ struct Stashed {
659
+ Stashed(T&& v) : prior_value(std::move(v)) {}
660
+ T prior_value;
661
+ // Note: we need count here to support duplicate calls to before()
662
+ // which happen when we have multiple autograd::Edge objects pointing
663
+ // to the same autograd::Node
664
+ int count = 1;
665
+ };
666
+
667
+ template <typename T>
668
+ struct StashedVars : public std::unordered_map<const T*, Stashed<T>> {
669
+ void save(const T* key, T&& value) {
670
+ auto it = this->find(key);
671
+ if (it == this->end()) {
672
+ this->emplace(key, std::move(value));
673
+ } else {
674
+ // keep the value from the prior save()
675
+ it->second.count++;
676
+ }
677
+ }
678
+ void restore(T* var) {
679
+ auto it = this->find(var);
680
+ TORCH_INTERNAL_ASSERT(it != this->end(), "missing before())");
681
+ if (--it->second.count == 0) {
682
+ // restore the value on the last restore()
683
+ *var = std::move(it->second.prior_value);
684
+ this->erase(it);
685
+ }
686
+ }
687
+ void debug_assert() {
688
+ TORCH_INTERNAL_ASSERT(this->empty(), "missing call to after()");
689
+ }
690
+ };
691
+
692
+ AutogradCompilerCall& compiler;
693
+ TraceState& state;
694
+ // This is a borrowed reference, we do not increment ownership, or lower it,
695
+ // it's lifecycle is entirely longer than this objects.
696
+ PyObject* py_compiler;
697
+ const NodeCall& curr_node_call;
698
+
699
+ // These mappings are used to save the prior values when we overwrite things
700
+ // in before(). In after(), we use these to cleanup after ourselves.
701
+ StashedVars<SavedVariable> stashed_variables;
702
+ StashedVars<at::Tensor> stashed_tensors;
703
+ StashedVars<c10::SymInt> stashed_symints;
704
+ };
705
+
706
+ } // namespace torch::dynamo::autograd
707
+
708
+ template <>
709
+ struct std::hash<torch::dynamo::autograd::CacheKey> {
710
+ size_t operator()(const torch::dynamo::autograd::CacheKey& k) const {
711
+ return k.hash();
712
+ }
713
+ };
venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpp_shim.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef __cplusplus
4
+ extern "C" {
5
+ #endif
6
+
7
+ struct _PytorchRecordFunctionState;
8
+ typedef struct _PytorchRecordFunctionState _PytorchRecordFunctionState;
9
+
10
+ _PytorchRecordFunctionState* _pytorch_record_function_enter(const char* name);
11
+ void _pytorch_record_function_exit(_PytorchRecordFunctionState* state);
12
+
13
+ #ifdef __cplusplus
14
+ } // extern "C"
15
+ #endif
venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpython_defs.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/utils/python_compat.h>
4
+
5
+ // Functions that need to be copied from the CPython source
6
+ // should go in cpython_defs.c. Copying is required when, e.g.,
7
+ // we need to call internal CPython functions that are not exposed.
8
+
9
+ #if IS_PYTHON_3_11_PLUS
10
+
11
+ #include <internal/pycore_frame.h>
12
+
13
+ int THP_PyFrame_FastToLocalsWithError(_PyInterpreterFrame* frame);
14
+
15
+ PyFunctionObject* _PyFunction_CopyWithNewCode(
16
+ PyFunctionObject* o,
17
+ PyCodeObject* code);
18
+
19
+ void THP_PyFrame_Clear(_PyInterpreterFrame* frame);
20
+
21
+ #endif
venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/debug_macros.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <stdio.h>
4
+
5
+ #ifdef _WIN32
6
+ #define unlikely(x) (x)
7
+ #else
8
+ #define unlikely(x) __builtin_expect((x), 0)
9
+ #endif
10
+
11
+ #define NULL_CHECK(val) \
12
+ if (unlikely((val) == NULL)) { \
13
+ fprintf(stderr, "NULL ERROR: %s:%d\n", __FILE__, __LINE__); \
14
+ PyErr_Print(); \
15
+ abort(); \
16
+ } else { \
17
+ }
18
+
19
+ // CHECK might be previously declared
20
+ #undef CHECK
21
+ #define CHECK(cond) \
22
+ if (unlikely(!(cond))) { \
23
+ fprintf(stderr, "DEBUG CHECK FAILED: %s:%d\n", __FILE__, __LINE__); \
24
+ abort(); \
25
+ } else { \
26
+ }
27
+
28
+ // Uncomment next line to print debug message
29
+ // #define TORCHDYNAMO_DEBUG 1
30
+ #ifdef TORCHDYNAMO_DEBUG
31
+
32
+ #define DEBUG_CHECK(cond) CHECK(cond)
33
+ #define DEBUG_NULL_CHECK(val) NULL_CHECK(val)
34
+ #define DEBUG_TRACE(msg, ...) \
35
+ fprintf(stderr, "TRACE[%s:%d] " msg "\n", __func__, __LINE__, __VA_ARGS__)
36
+ #define DEBUG_TRACE0(msg) \
37
+ fprintf(stderr, "TRACE[%s:%d] " msg "\n", __func__, __LINE__)
38
+
39
+ #else
40
+
41
+ #define DEBUG_CHECK(cond)
42
+ #define DEBUG_NULL_CHECK(val)
43
+ #define DEBUG_TRACE(msg, ...)
44
+ #define DEBUG_TRACE0(msg)
45
+
46
+ #endif
venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/eval_frame.h ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <Python.h>
3
+
4
+ extern "C" {
5
+ PyObject* torch_c_dynamo_eval_frame_init(void);
6
+ }
venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/extra_state.h ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <Python.h>
4
+
5
+ #ifdef __cplusplus
6
+
7
+ #include <torch/csrc/dynamo/utils.h>
8
+ #include <torch/csrc/utils/pybind.h>
9
+ #include <list>
10
+
11
+ namespace py = pybind11;
12
+
13
+ extern "C" {
14
+
15
+ #endif
16
+
17
+ // Flag to just run a frame normally
18
+ #define SKIP_CODE ((void*)0x1)
19
+
20
+ // Points to the extra scratch space on the code object
21
+ extern Py_ssize_t extra_index;
22
+
23
+ // function to call when cache lookup errors
24
+ extern PyObject* guard_error_hook;
25
+
26
+ typedef PyObject FrameState;
27
+ typedef struct CacheEntry CacheEntry;
28
+
29
+ // ExtraState encasulates CacheEntry and FrameState. ExtraState is the highest
30
+ // level of abstraction of what is stored on the extra code object. Previously,
31
+ // we saved different parts on different extra indexes. We prefer this way
32
+ // because of cleaner abstraction and faster SetExtra access.
33
+
34
+ #ifdef __cplusplus
35
+
36
+ typedef struct VISIBILITY_HIDDEN ExtraState {
37
+ // List of cache entries for compiled code objects
38
+ std::list<CacheEntry> cache_entry_list;
39
+ // Frame state to detect dynamic shape dims
40
+ py::dict frame_state;
41
+
42
+ CacheEntry* get_first_entry();
43
+ void move_to_front(CacheEntry* cache_entry);
44
+ void invalidate(CacheEntry* cache_entry);
45
+ } ExtraState;
46
+
47
+ #else
48
+
49
+ typedef struct ExtraState ExtraState;
50
+
51
+ #endif
52
+
53
+ // Helper to extra the cache_entry from the extra state.
54
+ // Ownership contract
55
+ // args
56
+ // - extra_state: Borrowed
57
+ // return
58
+ // - CacheEntry: Borrowed.
59
+ CacheEntry* extract_cache_entry(ExtraState* extra_state);
60
+
61
+ // Returns either the previously stored frame state or an empty dict.
62
+ // Ownership contract
63
+ // args
64
+ // - extra_state: Borrowed
65
+ // return
66
+ // - extra_state->frame_state: Borrowed.
67
+ FrameState* extract_frame_state(ExtraState* extra_state);
68
+
69
+ // Ownership contract
70
+ // args
71
+ // - code: Borrowed
72
+ // return
73
+ // - extra_state: Borrowed.
74
+ ExtraState* get_extra_state(PyCodeObject* code);
75
+
76
+ // This is passed as freefunc to _PyEval_RequestCodeExtraIndex. This acts as a
77
+ // deleter for the object on extra scratch space. This function is called
78
+ // internally in _PyCode_SetExtra and also during the code deallocation.
79
+
80
+ // Destroys the extra state by deleting cache_entry, frame state and finally
81
+ // freeing the constructed extra state.
82
+
83
+ // Developer note - You should not call this function directly. This is called
84
+ // directly inside set_extra_state. If you are in a situation trying to call
85
+ // this function, consider if set_extra_state should be called.
86
+ void destroy_extra_state(void* obj);
87
+
88
+ // Clears the existing object sitting on the extra scratch spance and sets it
89
+ // up with the new state. Note that _PyCode_SetExtra calls the
90
+ // destroy_extra_state deleter internally, and therefore we don't call it
91
+ // explicity here.
92
+
93
+ // Ownership contract
94
+ // args
95
+ // - extra_state: Stolen
96
+ // return
97
+ // - there is no return, but the extra_state is stolen, so it becomes
98
+ // set_extra_state responsibility to clean it up. It will be deleted during
99
+ // the reset_code/skip, when the set_extra_state is called with
100
+ // NULL/SKIP_CODE.
101
+
102
+ // Invariant - Dont set the extra state for the extra state that is already on
103
+ // the code object. Otherwise, we will first free up the old extra state
104
+ // (which is also the new extra state) and write something invalid on the
105
+ // scratch space.
106
+ void set_extra_state(PyCodeObject* code, ExtraState* extra_state);
107
+
108
+ // Creates a new extra state and put it on the extra scrach space of the code
109
+ // object.
110
+
111
+ // Ownership contract
112
+ // args
113
+ // - code: Borrowed
114
+ // return:
115
+ // - extra_state: New reference.
116
+ // These references are then further passed to set_extra_state which becomes
117
+ // the final owner of these references.
118
+ ExtraState* init_and_set_extra_state(PyCodeObject* code);
119
+
120
+ // Lookup the cache held by extra_state.
121
+ // Ownership contract
122
+ // args
123
+ // - extra_state: Borrowed
124
+ // - f_locals: Borrowed
125
+ // return:
126
+ // - Py_None or PyCodeObject: Borrowed reference.
127
+ PyObject* lookup(ExtraState* extra_state, PyObject* f_locals);
128
+
129
+ // Create a new cache entry at extra_state holding on to guarded_code.
130
+ // Ownership contract
131
+ // args
132
+ // - extra_state: Borrowed
133
+ // - guarded_code: Borrowed
134
+ // return:
135
+ // - cache_entry: Borrowed reference
136
+ CacheEntry* create_cache_entry(ExtraState* extra_state, PyObject* guraded_code);
137
+
138
+ #ifdef __cplusplus
139
+
140
+ } // extern "C"
141
+
142
+ // Returns the list of CacheEntry corresponding to code_obj.
143
+ // Warning: returns references whose lifetimes are controlled by C++
144
+ py::list _debug_get_cache_entry_list(const py::handle& code_obj);
145
+
146
+ #endif
venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/guards.h ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/python_headers.h>
3
+
4
+ PyObject* torch_c_dynamo_guards_init();
venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/init.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // C2039 MSVC
4
+ #include <pybind11/complex.h>
5
+ #include <torch/csrc/utils/pybind.h>
6
+
7
+ #include <Python.h>
8
+
9
+ namespace torch {
10
+ namespace dynamo {
11
+ void initDynamoBindings(PyObject* torch);
12
+ }
13
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/python_compiled_autograd.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/utils/python_stub.h>
3
+
4
+ // see [Note: Compiled Autograd]
5
+ namespace torch::dynamo::autograd {
6
+ PyObject* torch_c_dynamo_compiled_autograd_init();
7
+ } // namespace torch::dynamo::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/utils.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // The visibility attribute is to avoid a warning about storing a field in the
4
+ // struct that has a different visibility (from pybind) than the struct.
5
+ #ifdef _WIN32
6
+ #define VISIBILITY_HIDDEN
7
+ #else
8
+ #define VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
9
+ #endif
venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/byte_order.h ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/BFloat16.h>
4
+ #include <c10/util/Float8_e4m3fn.h>
5
+ #include <c10/util/Float8_e4m3fnuz.h>
6
+ #include <c10/util/Float8_e5m2.h>
7
+ #include <c10/util/Float8_e5m2fnuz.h>
8
+ #include <c10/util/Half.h>
9
+ #include <torch/csrc/Export.h>
10
+ #include <cstddef>
11
+ #include <cstdint>
12
+
13
+ #ifdef __FreeBSD__
14
+ #include <sys/endian.h>
15
+ #include <sys/types.h>
16
+ #define thp_bswap16(x) bswap16(x)
17
+ #define thp_bswap32(x) bswap32(x)
18
+ #define thp_bswap64(x) bswap64(x)
19
+ #elif defined(__APPLE__)
20
+ #include <libkern/OSByteOrder.h>
21
+ #define thp_bswap16(x) OSSwapInt16(x)
22
+ #define thp_bswap32(x) OSSwapInt32(x)
23
+ #define thp_bswap64(x) OSSwapInt64(x)
24
+ #elif defined(__GNUC__) && !defined(__MINGW32__)
25
+ #include <byteswap.h>
26
+ #define thp_bswap16(x) bswap_16(x)
27
+ #define thp_bswap32(x) bswap_32(x)
28
+ #define thp_bswap64(x) bswap_64(x)
29
+ #elif defined _WIN32 || defined _WIN64
30
+ #define thp_bswap16(x) _byteswap_ushort(x)
31
+ #define thp_bswap32(x) _byteswap_ulong(x)
32
+ #define thp_bswap64(x) _byteswap_uint64(x)
33
+ #endif
34
+
35
+ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
36
+ #define to_be16(x) thp_bswap16(x)
37
+ #define from_be16(x) thp_bswap16(x)
38
+ #define to_be32(x) thp_bswap32(x)
39
+ #define from_be32(x) thp_bswap32(x)
40
+ #define to_be64(x) thp_bswap64(x)
41
+ #define from_be64(x) thp_bswap64(x)
42
+ #define to_le16(x) (x)
43
+ #define from_le16(x) (x)
44
+ #define to_le32(x) (x)
45
+ #define from_le32(x) (x)
46
+ #define to_le64(x) (x)
47
+ #define from_le64(x) (x)
48
+ #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
49
+ #define to_be16(x) (x)
50
+ #define from_be16(x) (x)
51
+ #define to_be32(x) (x)
52
+ #define from_be32(x) (x)
53
+ #define to_be64(x) (x)
54
+ #define from_be64(x) (x)
55
+ #define to_le16(x) thp_bswap16(x)
56
+ #define from_le16(x) thp_bswap16(x)
57
+ #define to_le32(x) thp_bswap32(x)
58
+ #define from_le32(x) thp_bswap32(x)
59
+ #define to_le64(x) thp_bswap64(x)
60
+ #define from_le64(x) thp_bswap64(x)
61
+ #else
62
+ #error Unexpected or undefined __BYTE_ORDER__
63
+ #endif
64
+
65
+ namespace torch {
66
+ namespace utils {
67
+
68
+ enum THPByteOrder { THP_LITTLE_ENDIAN = 0, THP_BIG_ENDIAN = 1 };
69
+
70
+ TORCH_API THPByteOrder THP_nativeByteOrder();
71
+
72
+ TORCH_API void THP_decodeInt16Buffer(
73
+ int16_t* dst,
74
+ const uint8_t* src,
75
+ bool do_byte_swap,
76
+ size_t len);
77
+ TORCH_API void THP_decodeInt32Buffer(
78
+ int32_t* dst,
79
+ const uint8_t* src,
80
+ bool do_byte_swap,
81
+ size_t len);
82
+ TORCH_API void THP_decodeInt64Buffer(
83
+ int64_t* dst,
84
+ const uint8_t* src,
85
+ bool do_byte_swap,
86
+ size_t len);
87
+ TORCH_API void THP_decodeHalfBuffer(
88
+ c10::Half* dst,
89
+ const uint8_t* src,
90
+ bool do_byte_swap,
91
+ size_t len);
92
+ TORCH_API void THP_decodeFloatBuffer(
93
+ float* dst,
94
+ const uint8_t* src,
95
+ bool do_byte_swap,
96
+ size_t len);
97
+ TORCH_API void THP_decodeDoubleBuffer(
98
+ double* dst,
99
+ const uint8_t* src,
100
+ bool do_byte_swap,
101
+ size_t len);
102
+ TORCH_API void THP_decodeBoolBuffer(
103
+ bool* dst,
104
+ const uint8_t* src,
105
+ bool do_byte_swap,
106
+ size_t len);
107
+ TORCH_API void THP_decodeBFloat16Buffer(
108
+ at::BFloat16* dst,
109
+ const uint8_t* src,
110
+ bool do_byte_swap,
111
+ size_t len);
112
+ TORCH_API void THP_decodeComplexFloatBuffer(
113
+ c10::complex<float>* dst,
114
+ const uint8_t* src,
115
+ bool do_byte_swap,
116
+ size_t len);
117
+ TORCH_API void THP_decodeComplexDoubleBuffer(
118
+ c10::complex<double>* dst,
119
+ const uint8_t* src,
120
+ bool do_byte_swap,
121
+ size_t len);
122
+
123
+ TORCH_API void THP_decodeInt16Buffer(
124
+ int16_t* dst,
125
+ const uint8_t* src,
126
+ THPByteOrder order,
127
+ size_t len);
128
+ TORCH_API void THP_decodeInt32Buffer(
129
+ int32_t* dst,
130
+ const uint8_t* src,
131
+ THPByteOrder order,
132
+ size_t len);
133
+ TORCH_API void THP_decodeInt64Buffer(
134
+ int64_t* dst,
135
+ const uint8_t* src,
136
+ THPByteOrder order,
137
+ size_t len);
138
+ TORCH_API void THP_decodeHalfBuffer(
139
+ c10::Half* dst,
140
+ const uint8_t* src,
141
+ THPByteOrder order,
142
+ size_t len);
143
+ TORCH_API void THP_decodeFloatBuffer(
144
+ float* dst,
145
+ const uint8_t* src,
146
+ THPByteOrder order,
147
+ size_t len);
148
+ TORCH_API void THP_decodeDoubleBuffer(
149
+ double* dst,
150
+ const uint8_t* src,
151
+ THPByteOrder order,
152
+ size_t len);
153
+ TORCH_API void THP_decodeBoolBuffer(
154
+ bool* dst,
155
+ const uint8_t* src,
156
+ THPByteOrder order,
157
+ size_t len);
158
+ TORCH_API void THP_decodeBFloat16Buffer(
159
+ at::BFloat16* dst,
160
+ const uint8_t* src,
161
+ THPByteOrder order,
162
+ size_t len);
163
+ TORCH_API void THP_decodeFloat8_e5m2Buffer(
164
+ at::Float8_e5m2* dst,
165
+ const uint8_t* src,
166
+ size_t len);
167
+ TORCH_API void THP_decodeFloat8_e4m3fnBuffer(
168
+ at::Float8_e4m3fn* dst,
169
+ const uint8_t* src,
170
+ size_t len);
171
+ TORCH_API void THP_decodeFloat8_e5m2fnuzBuffer(
172
+ at::Float8_e5m2fnuz* dst,
173
+ const uint8_t* src,
174
+ size_t len);
175
+ TORCH_API void THP_decodeFloat8_e4m3fnuzBuffer(
176
+ at::Float8_e4m3fnuz* dst,
177
+ const uint8_t* src,
178
+ size_t len);
179
+ TORCH_API void THP_decodeComplexFloatBuffer(
180
+ c10::complex<float>* dst,
181
+ const uint8_t* src,
182
+ THPByteOrder order,
183
+ size_t len);
184
+ TORCH_API void THP_decodeComplexDoubleBuffer(
185
+ c10::complex<double>* dst,
186
+ const uint8_t* src,
187
+ THPByteOrder order,
188
+ size_t len);
189
+
190
+ TORCH_API void THP_encodeInt16Buffer(
191
+ uint8_t* dst,
192
+ const int16_t* src,
193
+ THPByteOrder order,
194
+ size_t len);
195
+ TORCH_API void THP_encodeInt32Buffer(
196
+ uint8_t* dst,
197
+ const int32_t* src,
198
+ THPByteOrder order,
199
+ size_t len);
200
+ TORCH_API void THP_encodeInt64Buffer(
201
+ uint8_t* dst,
202
+ const int64_t* src,
203
+ THPByteOrder order,
204
+ size_t len);
205
+ TORCH_API void THP_encodeFloatBuffer(
206
+ uint8_t* dst,
207
+ const float* src,
208
+ THPByteOrder order,
209
+ size_t len);
210
+ TORCH_API void THP_encodeDoubleBuffer(
211
+ uint8_t* dst,
212
+ const double* src,
213
+ THPByteOrder order,
214
+ size_t len);
215
+ TORCH_API void THP_encodeComplexFloatBuffer(
216
+ uint8_t* dst,
217
+ const c10::complex<float>* src,
218
+ THPByteOrder order,
219
+ size_t len);
220
+ TORCH_API void THP_encodeComplexDoubleBuffer(
221
+ uint8_t* dst,
222
+ const c10::complex<double>* src,
223
+ THPByteOrder order,
224
+ size_t len);
225
+
226
+ } // namespace utils
227
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+
5
+ namespace torch {
6
+ TORCH_API bool get_cpp_stacktraces_enabled();
7
+ TORCH_API bool get_disable_addr2line();
8
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch {
4
+ namespace utils {
5
+
6
+ static inline bool cuda_enabled() {
7
+ #ifdef USE_CUDA
8
+ return true;
9
+ #else
10
+ return false;
11
+ #endif
12
+ }
13
+
14
+ } // namespace utils
15
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/device_lazy_init.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/TensorOptions.h>
4
+
5
+ // device_lazy_init() is always compiled, even for CPU-only builds.
6
+
7
+ namespace torch::utils {
8
+
9
+ /**
10
+ * This mechanism of lazy initialization is designed for each device backend.
11
+ * Currently, CUDA and XPU follow this design. This function `device_lazy_init`
12
+ * MUST be called before you attempt to access any Type(CUDA or XPU) object
13
+ * from ATen, in any way. It guarantees that the device runtime status is lazily
14
+ * initialized when the first runtime API is requested.
15
+ *
16
+ * Here are some common ways that a device object may be retrieved:
17
+ * - You call getNonVariableType or getNonVariableTypeOpt
18
+ * - You call toBackend() on a Type
19
+ *
20
+ * It's important to do this correctly, because if you forget to add it you'll
21
+ * get an oblique error message seems like "Cannot initialize CUDA without
22
+ * ATen_cuda library" or "Cannot initialize XPU without ATen_xpu library" if you
23
+ * try to use CUDA or XPU functionality from a CPU-only build, which is not good
24
+ * UX.
25
+ */
26
+ void device_lazy_init(at::DeviceType device_type);
27
+ void set_requires_device_init(at::DeviceType device_type, bool value);
28
+
29
+ static inline void maybe_initialize_device(at::Device& device) {
30
+ // Add more devices here to enable lazy initialization.
31
+ if (device.is_cuda() || device.is_xpu()) {
32
+ device_lazy_init(device.type());
33
+ }
34
+ }
35
+
36
+ static inline void maybe_initialize_device(c10::optional<at::Device>& device) {
37
+ if (!device.has_value()) {
38
+ return;
39
+ }
40
+ maybe_initialize_device(device.value());
41
+ }
42
+
43
+ static inline void maybe_initialize_device(const at::TensorOptions& options) {
44
+ auto device = options.device();
45
+ maybe_initialize_device(device);
46
+ }
47
+
48
+ } // namespace torch::utils
venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/disable_torch_function.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/DispatchKey.h>
3
+ #include <c10/core/impl/LocalDispatchKeySet.h>
4
+ #include <torch/csrc/python_headers.h>
5
+
6
+ namespace torch {
7
+ // Sometimes we don't want infinite recursion for subclasses,
8
+ // Or a way to achieve the old behaviour.
9
+
10
+ // This is an internal utility, not exposed to users.
11
+ bool torch_function_enabled();
12
+ PyObject* disabled_torch_function_impl();
13
+ PyObject* disabled_torch_dispatch_impl();
14
+ void set_disabled_torch_function_impl(PyObject* value);
15
+ void set_disabled_torch_dispatch_impl(PyObject* value);
16
+ // Set ignore_mode to true if you're trying to collect overloaded arguments;
17
+ // using mode here will improperly cause you to add ALL objects to the
18
+ // overloaded list even if they don't actually have __torch_function__
19
+ bool check_has_torch_function(PyObject* obj, bool ignore_mode = false);
20
+
21
+ struct DisableTorchDispatch {
22
+ DisableTorchDispatch()
23
+ : guard_(c10::DispatchKeySet(
24
+ {c10::DispatchKey::Python, c10::DispatchKey::PreDispatch})),
25
+ guard_tls_snapshot_(c10::DispatchKey::PythonTLSSnapshot) {}
26
+ c10::impl::ExcludeDispatchKeyGuard guard_;
27
+ c10::impl::ExcludeDispatchKeyGuard guard_tls_snapshot_;
28
+ };
29
+
30
+ } // namespace torch
31
+
32
+ PyObject* THPModule_isEnabledTorchFunction(PyObject* self, PyObject* unused);
33
+ PyObject* THPModule_DisableTorchFunctionType();
34
+ PyObject* THPModule_DisableTorchFunctionSubclassType();
35
+ PyObject* THPModule_disable_torch_function(PyObject* self, PyObject* args);
36
+ PyObject* THPModule_disable_torch_dispatch(PyObject* self, PyObject* args);
37
+ PyObject* THPModule_has_torch_function(PyObject*, PyObject* arg);
38
+ PyObject* THPModule_has_torch_function_unary(PyObject*, PyObject* obj);
39
+ PyObject* THPModule_has_torch_function_variadic(
40
+ PyObject*,
41
+ PyObject* const* args,
42
+ Py_ssize_t nargs);