Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/all.h +23 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/arg.h +23 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/autograd.h +5 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/cuda.h +30 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data.h +14 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/enum.h +212 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/expanding_array.h +182 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/fft.h +389 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/imethod.h +53 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/jit.h +36 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/linalg.h +1065 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/mps.h +44 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nested.h +95 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn.h +10 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h +98 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h +17 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/activation.h +966 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/batchnorm.h +83 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/distance.h +88 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/dropout.h +234 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/embedding.h +211 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/fold.h +102 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/instancenorm.h +63 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/linear.h +37 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/loss.h +1044 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/normalization.h +211 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pixelshuffle.h +47 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pooling.h +1098 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/vision.h +124 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h +124 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h +702 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h +36 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options.h +18 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/activation.h +714 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/adaptive.h +41 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/batchnorm.h +95 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/conv.h +415 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/distance.h +71 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/dropout.h +130 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/embedding.h +242 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/fold.h +99 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/instancenorm.h +89 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h +95 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h +802 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/normalization.h +192 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/padding.h +219 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pixelshuffle.h +65 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pooling.h +573 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/rnn.h +236 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h +64 -0
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/all.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#if !defined(_MSC_VER) && __cplusplus < 201703L
|
4 |
+
#error C++17 or later compatible compiler is required to use PyTorch.
|
5 |
+
#endif
|
6 |
+
|
7 |
+
#include <torch/autograd.h>
|
8 |
+
#include <torch/cuda.h>
|
9 |
+
#include <torch/data.h>
|
10 |
+
#include <torch/enum.h>
|
11 |
+
#include <torch/fft.h>
|
12 |
+
#include <torch/jit.h>
|
13 |
+
#include <torch/linalg.h>
|
14 |
+
#include <torch/mps.h>
|
15 |
+
#include <torch/nested.h>
|
16 |
+
#include <torch/nn.h>
|
17 |
+
#include <torch/optim.h>
|
18 |
+
#include <torch/serialize.h>
|
19 |
+
#include <torch/sparse.h>
|
20 |
+
#include <torch/special.h>
|
21 |
+
#include <torch/types.h>
|
22 |
+
#include <torch/utils.h>
|
23 |
+
#include <torch/version.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/arg.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <utility>
|
4 |
+
|
5 |
+
#define TORCH_ARG(T, name) \
|
6 |
+
public: \
|
7 |
+
inline auto name(const T& new_##name)->decltype(*this) { /* NOLINT */ \
|
8 |
+
this->name##_ = new_##name; \
|
9 |
+
return *this; \
|
10 |
+
} \
|
11 |
+
inline auto name(T&& new_##name)->decltype(*this) { /* NOLINT */ \
|
12 |
+
this->name##_ = std::move(new_##name); \
|
13 |
+
return *this; \
|
14 |
+
} \
|
15 |
+
inline const T& name() const noexcept { /* NOLINT */ \
|
16 |
+
return this->name##_; \
|
17 |
+
} \
|
18 |
+
inline T& name() noexcept { /* NOLINT */ \
|
19 |
+
return this->name##_; \
|
20 |
+
} \
|
21 |
+
\
|
22 |
+
private: \
|
23 |
+
T name##_ /* NOLINT */
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/autograd.h
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/autograd/autograd.h>
|
4 |
+
#include <torch/csrc/autograd/autograd_not_implemented_fallback.h>
|
5 |
+
#include <torch/csrc/autograd/custom_function.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/cuda.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
|
5 |
+
#include <cstddef>
|
6 |
+
#include <cstdint>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace cuda {
|
10 |
+
|
11 |
+
/// Returns the number of CUDA devices available.
|
12 |
+
size_t TORCH_API device_count();
|
13 |
+
|
14 |
+
/// Returns true if at least one CUDA device is available.
|
15 |
+
bool TORCH_API is_available();
|
16 |
+
|
17 |
+
/// Returns true if CUDA is available, and CuDNN is available.
|
18 |
+
bool TORCH_API cudnn_is_available();
|
19 |
+
|
20 |
+
/// Sets the seed for the current GPU.
|
21 |
+
void TORCH_API manual_seed(uint64_t seed);
|
22 |
+
|
23 |
+
/// Sets the seed for all available GPUs.
|
24 |
+
void TORCH_API manual_seed_all(uint64_t seed);
|
25 |
+
|
26 |
+
/// Waits for all kernels in all streams on a CUDA device to complete.
|
27 |
+
void TORCH_API synchronize(int64_t device_index = -1);
|
28 |
+
|
29 |
+
} // namespace cuda
|
30 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data.h
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/data/dataloader.h>
|
4 |
+
#include <torch/data/datasets.h>
|
5 |
+
#include <torch/data/samplers.h>
|
6 |
+
#include <torch/data/transforms.h>
|
7 |
+
|
8 |
+
// Some "exports".
|
9 |
+
namespace torch {
|
10 |
+
namespace data {
|
11 |
+
using datasets::BatchDataset;
|
12 |
+
using datasets::Dataset;
|
13 |
+
} // namespace data
|
14 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/enum.h
ADDED
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <string>
|
4 |
+
#include <variant>
|
5 |
+
|
6 |
+
#include <ATen/core/Reduction.h>
|
7 |
+
#include <c10/util/Exception.h>
|
8 |
+
#include <torch/csrc/Export.h>
|
9 |
+
|
10 |
+
#define TORCH_ENUM_DECLARE(name) \
|
11 |
+
namespace torch { \
|
12 |
+
namespace enumtype { \
|
13 |
+
/* \
|
14 |
+
NOTE: We need to provide the default constructor for each struct, \
|
15 |
+
otherwise Clang 3.8 would complain: \
|
16 |
+
``` \
|
17 |
+
error: default initialization of an object of const type 'const \
|
18 |
+
enumtype::Enum1' without a user-provided default constructor \
|
19 |
+
``` \
|
20 |
+
*/ \
|
21 |
+
struct k##name { \
|
22 |
+
k##name() {} \
|
23 |
+
}; \
|
24 |
+
} \
|
25 |
+
TORCH_API extern const enumtype::k##name k##name; \
|
26 |
+
}
|
27 |
+
|
28 |
+
#define TORCH_ENUM_DEFINE(name) \
|
29 |
+
namespace torch { \
|
30 |
+
const enumtype::k##name k##name; \
|
31 |
+
}
|
32 |
+
|
33 |
+
#define TORCH_ENUM_PRETTY_PRINT(name) \
|
34 |
+
std::string operator()(const enumtype::k##name& v) const { \
|
35 |
+
std::string k("k"); \
|
36 |
+
return k + #name; \
|
37 |
+
}
|
38 |
+
|
39 |
+
// NOTE: Backstory on why we need the following two macros:
|
40 |
+
//
|
41 |
+
// Consider the following options class:
|
42 |
+
//
|
43 |
+
// ```
|
44 |
+
// struct TORCH_API SomeOptions {
|
45 |
+
// typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
46 |
+
// reduction_t; SomeOptions(reduction_t reduction = torch::kMean) :
|
47 |
+
// reduction_(reduction) {}
|
48 |
+
//
|
49 |
+
// TORCH_ARG(reduction_t, reduction);
|
50 |
+
// };
|
51 |
+
// ```
|
52 |
+
//
|
53 |
+
// and the functional that uses it:
|
54 |
+
//
|
55 |
+
// ```
|
56 |
+
// Tensor some_functional(
|
57 |
+
// const Tensor& input,
|
58 |
+
// SomeOptions options = {}) {
|
59 |
+
// ...
|
60 |
+
// }
|
61 |
+
// ```
|
62 |
+
//
|
63 |
+
// Normally, we would expect this to work:
|
64 |
+
//
|
65 |
+
// `F::some_functional(input, torch::kNone)`
|
66 |
+
//
|
67 |
+
// However, it throws the following error instead:
|
68 |
+
//
|
69 |
+
// ```
|
70 |
+
// error: could not convert `torch::kNone` from `const torch::enumtype::kNone`
|
71 |
+
// to `torch::nn::SomeOptions`
|
72 |
+
// ```
|
73 |
+
//
|
74 |
+
// To get around this problem, we explicitly provide the following constructors
|
75 |
+
// for `SomeOptions`:
|
76 |
+
//
|
77 |
+
// ```
|
78 |
+
// SomeOptions(torch::enumtype::kNone reduction) : reduction_(torch::kNone) {}
|
79 |
+
// SomeOptions(torch::enumtype::kMean reduction) : reduction_(torch::kMean) {}
|
80 |
+
// SomeOptions(torch::enumtype::kSum reduction) : reduction_(torch::kSum) {}
|
81 |
+
// ```
|
82 |
+
//
|
83 |
+
// so that the conversion from `torch::kNone` to `SomeOptions` would work.
|
84 |
+
//
|
85 |
+
// Note that we also provide the default constructor `SomeOptions() {}`, so that
|
86 |
+
// `SomeOptions options = {}` can work.
|
87 |
+
#define TORCH_OPTIONS_CTOR_VARIANT_ARG3( \
|
88 |
+
OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3) \
|
89 |
+
OPTIONS_NAME() = default; \
|
90 |
+
OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {} \
|
91 |
+
OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {} \
|
92 |
+
OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {}
|
93 |
+
|
94 |
+
#define TORCH_OPTIONS_CTOR_VARIANT_ARG4( \
|
95 |
+
OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3, TYPE4) \
|
96 |
+
OPTIONS_NAME() = default; \
|
97 |
+
OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {} \
|
98 |
+
OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {} \
|
99 |
+
OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {} \
|
100 |
+
OPTIONS_NAME(torch::enumtype::TYPE4 ARG_NAME) : ARG_NAME##_(torch::TYPE4) {}
|
101 |
+
|
102 |
+
TORCH_ENUM_DECLARE(Linear)
|
103 |
+
TORCH_ENUM_DECLARE(Conv1D)
|
104 |
+
TORCH_ENUM_DECLARE(Conv2D)
|
105 |
+
TORCH_ENUM_DECLARE(Conv3D)
|
106 |
+
TORCH_ENUM_DECLARE(ConvTranspose1D)
|
107 |
+
TORCH_ENUM_DECLARE(ConvTranspose2D)
|
108 |
+
TORCH_ENUM_DECLARE(ConvTranspose3D)
|
109 |
+
TORCH_ENUM_DECLARE(Sigmoid)
|
110 |
+
TORCH_ENUM_DECLARE(Tanh)
|
111 |
+
TORCH_ENUM_DECLARE(ReLU)
|
112 |
+
TORCH_ENUM_DECLARE(GELU)
|
113 |
+
TORCH_ENUM_DECLARE(SiLU)
|
114 |
+
TORCH_ENUM_DECLARE(Mish)
|
115 |
+
TORCH_ENUM_DECLARE(LeakyReLU)
|
116 |
+
TORCH_ENUM_DECLARE(FanIn)
|
117 |
+
TORCH_ENUM_DECLARE(FanOut)
|
118 |
+
TORCH_ENUM_DECLARE(Constant)
|
119 |
+
TORCH_ENUM_DECLARE(Reflect)
|
120 |
+
TORCH_ENUM_DECLARE(Replicate)
|
121 |
+
TORCH_ENUM_DECLARE(Circular)
|
122 |
+
TORCH_ENUM_DECLARE(Nearest)
|
123 |
+
TORCH_ENUM_DECLARE(Bilinear)
|
124 |
+
TORCH_ENUM_DECLARE(Bicubic)
|
125 |
+
TORCH_ENUM_DECLARE(Trilinear)
|
126 |
+
TORCH_ENUM_DECLARE(Area)
|
127 |
+
TORCH_ENUM_DECLARE(NearestExact)
|
128 |
+
TORCH_ENUM_DECLARE(Sum)
|
129 |
+
TORCH_ENUM_DECLARE(Mean)
|
130 |
+
TORCH_ENUM_DECLARE(Max)
|
131 |
+
TORCH_ENUM_DECLARE(None)
|
132 |
+
TORCH_ENUM_DECLARE(BatchMean)
|
133 |
+
TORCH_ENUM_DECLARE(Zeros)
|
134 |
+
TORCH_ENUM_DECLARE(Border)
|
135 |
+
TORCH_ENUM_DECLARE(Reflection)
|
136 |
+
TORCH_ENUM_DECLARE(RNN_TANH)
|
137 |
+
TORCH_ENUM_DECLARE(RNN_RELU)
|
138 |
+
TORCH_ENUM_DECLARE(LSTM)
|
139 |
+
TORCH_ENUM_DECLARE(GRU)
|
140 |
+
TORCH_ENUM_DECLARE(Valid)
|
141 |
+
TORCH_ENUM_DECLARE(Same)
|
142 |
+
|
143 |
+
namespace torch {
|
144 |
+
namespace enumtype {
|
145 |
+
|
146 |
+
struct _compute_enum_name {
|
147 |
+
TORCH_ENUM_PRETTY_PRINT(Linear)
|
148 |
+
TORCH_ENUM_PRETTY_PRINT(Conv1D)
|
149 |
+
TORCH_ENUM_PRETTY_PRINT(Conv2D)
|
150 |
+
TORCH_ENUM_PRETTY_PRINT(Conv3D)
|
151 |
+
TORCH_ENUM_PRETTY_PRINT(ConvTranspose1D)
|
152 |
+
TORCH_ENUM_PRETTY_PRINT(ConvTranspose2D)
|
153 |
+
TORCH_ENUM_PRETTY_PRINT(ConvTranspose3D)
|
154 |
+
TORCH_ENUM_PRETTY_PRINT(Sigmoid)
|
155 |
+
TORCH_ENUM_PRETTY_PRINT(Tanh)
|
156 |
+
TORCH_ENUM_PRETTY_PRINT(ReLU)
|
157 |
+
TORCH_ENUM_PRETTY_PRINT(GELU)
|
158 |
+
TORCH_ENUM_PRETTY_PRINT(SiLU)
|
159 |
+
TORCH_ENUM_PRETTY_PRINT(Mish)
|
160 |
+
TORCH_ENUM_PRETTY_PRINT(LeakyReLU)
|
161 |
+
TORCH_ENUM_PRETTY_PRINT(FanIn)
|
162 |
+
TORCH_ENUM_PRETTY_PRINT(FanOut)
|
163 |
+
TORCH_ENUM_PRETTY_PRINT(Constant)
|
164 |
+
TORCH_ENUM_PRETTY_PRINT(Reflect)
|
165 |
+
TORCH_ENUM_PRETTY_PRINT(Replicate)
|
166 |
+
TORCH_ENUM_PRETTY_PRINT(Circular)
|
167 |
+
TORCH_ENUM_PRETTY_PRINT(Nearest)
|
168 |
+
TORCH_ENUM_PRETTY_PRINT(Bilinear)
|
169 |
+
TORCH_ENUM_PRETTY_PRINT(Bicubic)
|
170 |
+
TORCH_ENUM_PRETTY_PRINT(Trilinear)
|
171 |
+
TORCH_ENUM_PRETTY_PRINT(Area)
|
172 |
+
TORCH_ENUM_PRETTY_PRINT(NearestExact)
|
173 |
+
TORCH_ENUM_PRETTY_PRINT(Sum)
|
174 |
+
TORCH_ENUM_PRETTY_PRINT(Mean)
|
175 |
+
TORCH_ENUM_PRETTY_PRINT(Max)
|
176 |
+
TORCH_ENUM_PRETTY_PRINT(None)
|
177 |
+
TORCH_ENUM_PRETTY_PRINT(BatchMean)
|
178 |
+
TORCH_ENUM_PRETTY_PRINT(Zeros)
|
179 |
+
TORCH_ENUM_PRETTY_PRINT(Border)
|
180 |
+
TORCH_ENUM_PRETTY_PRINT(Reflection)
|
181 |
+
TORCH_ENUM_PRETTY_PRINT(RNN_TANH)
|
182 |
+
TORCH_ENUM_PRETTY_PRINT(RNN_RELU)
|
183 |
+
TORCH_ENUM_PRETTY_PRINT(LSTM)
|
184 |
+
TORCH_ENUM_PRETTY_PRINT(GRU)
|
185 |
+
TORCH_ENUM_PRETTY_PRINT(Valid)
|
186 |
+
TORCH_ENUM_PRETTY_PRINT(Same)
|
187 |
+
};
|
188 |
+
|
189 |
+
template <typename V>
|
190 |
+
std::string get_enum_name(V variant_enum) {
|
191 |
+
return std::visit(enumtype::_compute_enum_name{}, variant_enum);
|
192 |
+
}
|
193 |
+
|
194 |
+
template <typename V>
|
195 |
+
at::Reduction::Reduction reduction_get_enum(V variant_enum) {
|
196 |
+
if (std::holds_alternative<enumtype::kNone>(variant_enum)) {
|
197 |
+
return at::Reduction::None;
|
198 |
+
} else if (std::holds_alternative<enumtype::kMean>(variant_enum)) {
|
199 |
+
return at::Reduction::Mean;
|
200 |
+
} else if (std::holds_alternative<enumtype::kSum>(variant_enum)) {
|
201 |
+
return at::Reduction::Sum;
|
202 |
+
} else {
|
203 |
+
TORCH_CHECK(
|
204 |
+
false,
|
205 |
+
get_enum_name(variant_enum),
|
206 |
+
" is not a valid value for reduction");
|
207 |
+
return at::Reduction::END;
|
208 |
+
}
|
209 |
+
}
|
210 |
+
|
211 |
+
} // namespace enumtype
|
212 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/expanding_array.h
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/ArrayRef.h>
|
4 |
+
#include <c10/util/Exception.h>
|
5 |
+
#include <c10/util/Optional.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
|
8 |
+
#include <algorithm>
|
9 |
+
#include <array>
|
10 |
+
#include <cstdint>
|
11 |
+
#include <initializer_list>
|
12 |
+
#include <string>
|
13 |
+
#include <vector>
|
14 |
+
|
15 |
+
namespace torch {
|
16 |
+
|
17 |
+
/// A utility class that accepts either a container of `D`-many values, or a
|
18 |
+
/// single value, which is internally repeated `D` times. This is useful to
|
19 |
+
/// represent parameters that are multidimensional, but often equally sized in
|
20 |
+
/// all dimensions. For example, the kernel size of a 2D convolution has an `x`
|
21 |
+
/// and `y` length, but `x` and `y` are often equal. In such a case you could
|
22 |
+
/// just pass `3` to an `ExpandingArray<2>` and it would "expand" to `{3, 3}`.
|
23 |
+
template <size_t D, typename T = int64_t>
|
24 |
+
class ExpandingArray {
|
25 |
+
public:
|
26 |
+
/// Constructs an `ExpandingArray` from an `initializer_list`. The extent of
|
27 |
+
/// the length is checked against the `ExpandingArray`'s extent parameter `D`
|
28 |
+
/// at runtime.
|
29 |
+
/*implicit*/ ExpandingArray(std::initializer_list<T> list)
|
30 |
+
: ExpandingArray(at::ArrayRef<T>(list)) {}
|
31 |
+
|
32 |
+
/// Constructs an `ExpandingArray` from an `std::vector`. The extent of
|
33 |
+
/// the length is checked against the `ExpandingArray`'s extent parameter `D`
|
34 |
+
/// at runtime.
|
35 |
+
/*implicit*/ ExpandingArray(std::vector<T> vec)
|
36 |
+
: ExpandingArray(at::ArrayRef<T>(vec)) {}
|
37 |
+
|
38 |
+
/// Constructs an `ExpandingArray` from an `at::ArrayRef`. The extent of
|
39 |
+
/// the length is checked against the `ExpandingArray`'s extent parameter `D`
|
40 |
+
/// at runtime.
|
41 |
+
/*implicit*/ ExpandingArray(at::ArrayRef<T> values) {
|
42 |
+
// clang-format off
|
43 |
+
TORCH_CHECK(
|
44 |
+
values.size() == D,
|
45 |
+
"Expected ", D, " values, but instead got ", values.size());
|
46 |
+
// clang-format on
|
47 |
+
std::copy(values.begin(), values.end(), values_.begin());
|
48 |
+
}
|
49 |
+
|
50 |
+
/// Constructs an `ExpandingArray` from a single value, which is repeated `D`
|
51 |
+
/// times (where `D` is the extent parameter of the `ExpandingArray`).
|
52 |
+
/*implicit*/ ExpandingArray(T single_size) {
|
53 |
+
values_.fill(single_size);
|
54 |
+
}
|
55 |
+
|
56 |
+
/// Constructs an `ExpandingArray` from a correctly sized `std::array`.
|
57 |
+
/*implicit*/ ExpandingArray(const std::array<T, D>& values)
|
58 |
+
: values_(values) {}
|
59 |
+
|
60 |
+
/// Accesses the underlying `std::array`.
|
61 |
+
std::array<T, D>& operator*() {
|
62 |
+
return values_;
|
63 |
+
}
|
64 |
+
|
65 |
+
/// Accesses the underlying `std::array`.
|
66 |
+
const std::array<T, D>& operator*() const {
|
67 |
+
return values_;
|
68 |
+
}
|
69 |
+
|
70 |
+
/// Accesses the underlying `std::array`.
|
71 |
+
std::array<T, D>* operator->() {
|
72 |
+
return &values_;
|
73 |
+
}
|
74 |
+
|
75 |
+
/// Accesses the underlying `std::array`.
|
76 |
+
const std::array<T, D>* operator->() const {
|
77 |
+
return &values_;
|
78 |
+
}
|
79 |
+
|
80 |
+
/// Returns an `ArrayRef` to the underlying `std::array`.
|
81 |
+
operator at::ArrayRef<T>() const {
|
82 |
+
return values_;
|
83 |
+
}
|
84 |
+
|
85 |
+
/// Returns the extent of the `ExpandingArray`.
|
86 |
+
size_t size() const noexcept {
|
87 |
+
return D;
|
88 |
+
}
|
89 |
+
|
90 |
+
protected:
|
91 |
+
/// The backing array.
|
92 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
93 |
+
std::array<T, D> values_;
|
94 |
+
};
|
95 |
+
|
96 |
+
template <size_t D, typename T>
|
97 |
+
std::ostream& operator<<(
|
98 |
+
std::ostream& stream,
|
99 |
+
const ExpandingArray<D, T>& expanding_array) {
|
100 |
+
if (expanding_array.size() == 1) {
|
101 |
+
return stream << expanding_array->at(0);
|
102 |
+
}
|
103 |
+
return stream << static_cast<at::ArrayRef<T>>(expanding_array);
|
104 |
+
}
|
105 |
+
|
106 |
+
/// A utility class that accepts either a container of `D`-many
|
107 |
+
/// `c10::optional<T>` values, or a single `c10::optional<T>` value, which is
|
108 |
+
/// internally repeated `D` times. It has the additional ability to accept
|
109 |
+
/// containers of the underlying type `T` and convert them to a container of
|
110 |
+
/// `c10::optional<T>`.
|
111 |
+
template <size_t D, typename T = int64_t>
|
112 |
+
class ExpandingArrayWithOptionalElem
|
113 |
+
: public ExpandingArray<D, c10::optional<T>> {
|
114 |
+
public:
|
115 |
+
using ExpandingArray<D, c10::optional<T>>::ExpandingArray;
|
116 |
+
|
117 |
+
/// Constructs an `ExpandingArrayWithOptionalElem` from an `initializer_list`
|
118 |
+
/// of the underlying type `T`. The extent of the length is checked against
|
119 |
+
/// the `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime.
|
120 |
+
/*implicit*/ ExpandingArrayWithOptionalElem(std::initializer_list<T> list)
|
121 |
+
: ExpandingArrayWithOptionalElem(at::ArrayRef<T>(list)) {}
|
122 |
+
|
123 |
+
/// Constructs an `ExpandingArrayWithOptionalElem` from an `std::vector` of
|
124 |
+
/// the underlying type `T`. The extent of the length is checked against the
|
125 |
+
/// `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime.
|
126 |
+
/*implicit*/ ExpandingArrayWithOptionalElem(std::vector<T> vec)
|
127 |
+
: ExpandingArrayWithOptionalElem(at::ArrayRef<T>(vec)) {}
|
128 |
+
|
129 |
+
/// Constructs an `ExpandingArrayWithOptionalElem` from an `at::ArrayRef` of
|
130 |
+
/// the underlying type `T`. The extent of the length is checked against the
|
131 |
+
/// `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime.
|
132 |
+
/*implicit*/ ExpandingArrayWithOptionalElem(at::ArrayRef<T> values)
|
133 |
+
: ExpandingArray<D, c10::optional<T>>(0) {
|
134 |
+
// clang-format off
|
135 |
+
TORCH_CHECK(
|
136 |
+
values.size() == D,
|
137 |
+
"Expected ", D, " values, but instead got ", values.size());
|
138 |
+
// clang-format on
|
139 |
+
for (const auto i : c10::irange(this->values_.size())) {
|
140 |
+
this->values_[i] = values[i];
|
141 |
+
}
|
142 |
+
}
|
143 |
+
|
144 |
+
/// Constructs an `ExpandingArrayWithOptionalElem` from a single value of the
|
145 |
+
/// underlying type `T`, which is repeated `D` times (where `D` is the extent
|
146 |
+
/// parameter of the `ExpandingArrayWithOptionalElem`).
|
147 |
+
/*implicit*/ ExpandingArrayWithOptionalElem(T single_size)
|
148 |
+
: ExpandingArray<D, c10::optional<T>>(0) {
|
149 |
+
for (const auto i : c10::irange(this->values_.size())) {
|
150 |
+
this->values_[i] = single_size;
|
151 |
+
}
|
152 |
+
}
|
153 |
+
|
154 |
+
/// Constructs an `ExpandingArrayWithOptionalElem` from a correctly sized
|
155 |
+
/// `std::array` of the underlying type `T`.
|
156 |
+
/*implicit*/ ExpandingArrayWithOptionalElem(const std::array<T, D>& values)
|
157 |
+
: ExpandingArray<D, c10::optional<T>>(0) {
|
158 |
+
for (const auto i : c10::irange(this->values_.size())) {
|
159 |
+
this->values_[i] = values[i];
|
160 |
+
}
|
161 |
+
}
|
162 |
+
};
|
163 |
+
|
164 |
+
template <size_t D, typename T>
|
165 |
+
std::ostream& operator<<(
|
166 |
+
std::ostream& stream,
|
167 |
+
const ExpandingArrayWithOptionalElem<D, T>& expanding_array_with_opt_elem) {
|
168 |
+
if (expanding_array_with_opt_elem.size() == 1) {
|
169 |
+
const auto& elem = expanding_array_with_opt_elem->at(0);
|
170 |
+
stream << (elem.has_value() ? c10::str(elem.value()) : "None");
|
171 |
+
} else {
|
172 |
+
std::vector<std::string> str_array;
|
173 |
+
for (const auto& elem : *expanding_array_with_opt_elem) {
|
174 |
+
str_array.emplace_back(
|
175 |
+
elem.has_value() ? c10::str(elem.value()) : "None");
|
176 |
+
}
|
177 |
+
stream << at::ArrayRef<std::string>(str_array);
|
178 |
+
}
|
179 |
+
return stream;
|
180 |
+
}
|
181 |
+
|
182 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/fft.h
ADDED
@@ -0,0 +1,389 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/ATen.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace fft {
|
7 |
+
|
8 |
+
/// Computes the 1 dimensional fast Fourier transform over a given dimension.
|
9 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.fft.
|
10 |
+
///
|
11 |
+
/// Example:
|
12 |
+
/// ```
|
13 |
+
/// auto t = torch::randn(128, dtype=kComplexDouble);
|
14 |
+
/// torch::fft::fft(t);
|
15 |
+
/// ```
|
16 |
+
inline Tensor fft(
|
17 |
+
const Tensor& self,
|
18 |
+
c10::optional<SymInt> n = c10::nullopt,
|
19 |
+
int64_t dim = -1,
|
20 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
21 |
+
return torch::fft_fft_symint(self, n, dim, norm);
|
22 |
+
}
|
23 |
+
|
24 |
+
/// Computes the 1 dimensional inverse Fourier transform over a given dimension.
|
25 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.ifft.
|
26 |
+
///
|
27 |
+
/// Example:
|
28 |
+
/// ```
|
29 |
+
/// auto t = torch::randn(128, dtype=kComplexDouble);
|
30 |
+
/// torch::fft::ifft(t);
|
31 |
+
/// ```
|
32 |
+
inline Tensor ifft(
|
33 |
+
const Tensor& self,
|
34 |
+
c10::optional<SymInt> n = c10::nullopt,
|
35 |
+
int64_t dim = -1,
|
36 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
37 |
+
return torch::fft_ifft_symint(self, n, dim, norm);
|
38 |
+
}
|
39 |
+
|
40 |
+
/// Computes the 2-dimensional fast Fourier transform over the given dimensions.
|
41 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.fft2.
|
42 |
+
///
|
43 |
+
/// Example:
|
44 |
+
/// ```
|
45 |
+
/// auto t = torch::randn({128, 128}, dtype=kComplexDouble);
|
46 |
+
/// torch::fft::fft2(t);
|
47 |
+
/// ```
|
48 |
+
inline Tensor fft2(
|
49 |
+
const Tensor& self,
|
50 |
+
OptionalIntArrayRef s = c10::nullopt,
|
51 |
+
IntArrayRef dim = {-2, -1},
|
52 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
53 |
+
return torch::fft_fft2(self, s, dim, norm);
|
54 |
+
}
|
55 |
+
|
56 |
+
/// Computes the inverse of torch.fft.fft2
|
57 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.ifft2.
|
58 |
+
///
|
59 |
+
/// Example:
|
60 |
+
/// ```
|
61 |
+
/// auto t = torch::randn({128, 128}, dtype=kComplexDouble);
|
62 |
+
/// torch::fft::ifft2(t);
|
63 |
+
/// ```
|
64 |
+
inline Tensor ifft2(
|
65 |
+
const Tensor& self,
|
66 |
+
at::OptionalIntArrayRef s = c10::nullopt,
|
67 |
+
IntArrayRef dim = {-2, -1},
|
68 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
69 |
+
return torch::fft_ifft2(self, s, dim, norm);
|
70 |
+
}
|
71 |
+
|
72 |
+
/// Computes the N dimensional fast Fourier transform over given dimensions.
|
73 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.fftn.
|
74 |
+
///
|
75 |
+
/// Example:
|
76 |
+
/// ```
|
77 |
+
/// auto t = torch::randn({128, 128}, dtype=kComplexDouble);
|
78 |
+
/// torch::fft::fftn(t);
|
79 |
+
/// ```
|
80 |
+
inline Tensor fftn(
|
81 |
+
const Tensor& self,
|
82 |
+
at::OptionalIntArrayRef s = c10::nullopt,
|
83 |
+
at::OptionalIntArrayRef dim = c10::nullopt,
|
84 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
85 |
+
return torch::fft_fftn(self, s, dim, norm);
|
86 |
+
}
|
87 |
+
|
88 |
+
/// Computes the N dimensional fast Fourier transform over given dimensions.
|
89 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.ifftn.
|
90 |
+
///
|
91 |
+
/// Example:
|
92 |
+
/// ```
|
93 |
+
/// auto t = torch::randn({128, 128}, dtype=kComplexDouble);
|
94 |
+
/// torch::fft::ifftn(t);
|
95 |
+
/// ```
|
96 |
+
inline Tensor ifftn(
|
97 |
+
const Tensor& self,
|
98 |
+
at::OptionalIntArrayRef s = c10::nullopt,
|
99 |
+
at::OptionalIntArrayRef dim = c10::nullopt,
|
100 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
101 |
+
return torch::fft_ifftn(self, s, dim, norm);
|
102 |
+
}
|
103 |
+
|
104 |
+
/// Computes the 1 dimensional FFT of real input with onesided Hermitian output.
|
105 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.rfft.
|
106 |
+
///
|
107 |
+
/// Example:
|
108 |
+
/// ```
|
109 |
+
/// auto t = torch::randn(128);
|
110 |
+
/// auto T = torch::fft::rfft(t);
|
111 |
+
/// assert(T.is_complex() && T.numel() == 128 / 2 + 1);
|
112 |
+
/// ```
|
113 |
+
inline Tensor rfft(
|
114 |
+
const Tensor& self,
|
115 |
+
c10::optional<SymInt> n = c10::nullopt,
|
116 |
+
int64_t dim = -1,
|
117 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
118 |
+
return torch::fft_rfft_symint(self, n, dim, norm);
|
119 |
+
}
|
120 |
+
|
121 |
+
/// Computes the inverse of torch.fft.rfft
|
122 |
+
///
|
123 |
+
/// The input is a onesided Hermitian Fourier domain signal, with real-valued
|
124 |
+
/// output. See https://pytorch.org/docs/master/fft.html#torch.fft.irfft
|
125 |
+
///
|
126 |
+
/// Example:
|
127 |
+
/// ```
|
128 |
+
/// auto T = torch::randn(128 / 2 + 1, torch::kComplexDouble);
|
129 |
+
/// auto t = torch::fft::irfft(t, /*n=*/128);
|
130 |
+
/// assert(t.is_floating_point() && T.numel() == 128);
|
131 |
+
/// ```
|
132 |
+
inline Tensor irfft(
|
133 |
+
const Tensor& self,
|
134 |
+
c10::optional<SymInt> n = c10::nullopt,
|
135 |
+
int64_t dim = -1,
|
136 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
137 |
+
return torch::fft_irfft_symint(self, n, dim, norm);
|
138 |
+
}
|
139 |
+
|
140 |
+
/// Computes the 2-dimensional FFT of real input. Returns a onesided Hermitian
|
141 |
+
/// output. See https://pytorch.org/docs/master/fft.html#torch.fft.rfft2
|
142 |
+
///
|
143 |
+
/// Example:
|
144 |
+
/// ```
|
145 |
+
/// auto t = torch::randn({128, 128}, dtype=kDouble);
|
146 |
+
/// torch::fft::rfft2(t);
|
147 |
+
/// ```
|
148 |
+
inline Tensor rfft2(
|
149 |
+
const Tensor& self,
|
150 |
+
at::OptionalIntArrayRef s = c10::nullopt,
|
151 |
+
IntArrayRef dim = {-2, -1},
|
152 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
153 |
+
return torch::fft_rfft2(self, s, dim, norm);
|
154 |
+
}
|
155 |
+
|
156 |
+
/// Computes the inverse of torch.fft.rfft2.
|
157 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.irfft2.
|
158 |
+
///
|
159 |
+
/// Example:
|
160 |
+
/// ```
|
161 |
+
/// auto t = torch::randn({128, 128}, dtype=kComplexDouble);
|
162 |
+
/// torch::fft::irfft2(t);
|
163 |
+
/// ```
|
164 |
+
inline Tensor irfft2(
|
165 |
+
const Tensor& self,
|
166 |
+
at::OptionalIntArrayRef s = c10::nullopt,
|
167 |
+
IntArrayRef dim = {-2, -1},
|
168 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
169 |
+
return torch::fft_irfft2(self, s, dim, norm);
|
170 |
+
}
|
171 |
+
|
172 |
+
/// Computes the N dimensional FFT of real input with onesided Hermitian output.
|
173 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.rfftn
|
174 |
+
///
|
175 |
+
/// Example:
|
176 |
+
/// ```
|
177 |
+
/// auto t = torch::randn({128, 128}, dtype=kDouble);
|
178 |
+
/// torch::fft::rfftn(t);
|
179 |
+
/// ```
|
180 |
+
inline Tensor rfftn(
|
181 |
+
const Tensor& self,
|
182 |
+
at::OptionalIntArrayRef s = c10::nullopt,
|
183 |
+
at::OptionalIntArrayRef dim = c10::nullopt,
|
184 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
185 |
+
return torch::fft_rfftn(self, s, dim, norm);
|
186 |
+
}
|
187 |
+
|
188 |
+
/// Computes the inverse of torch.fft.rfftn.
|
189 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.irfftn.
|
190 |
+
///
|
191 |
+
/// Example:
|
192 |
+
/// ```
|
193 |
+
/// auto t = torch::randn({128, 128}, dtype=kComplexDouble);
|
194 |
+
/// torch::fft::irfftn(t);
|
195 |
+
/// ```
|
196 |
+
inline Tensor irfftn(
|
197 |
+
const Tensor& self,
|
198 |
+
at::OptionalIntArrayRef s = c10::nullopt,
|
199 |
+
at::OptionalIntArrayRef dim = c10::nullopt,
|
200 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
201 |
+
return torch::fft_irfftn(self, s, dim, norm);
|
202 |
+
}
|
203 |
+
|
204 |
+
/// Computes the 1 dimensional FFT of a onesided Hermitian signal
|
205 |
+
///
|
206 |
+
/// The input represents a Hermitian symmetric time domain signal. The returned
|
207 |
+
/// Fourier domain representation of such a signal is a real-valued. See
|
208 |
+
/// https://pytorch.org/docs/master/fft.html#torch.fft.hfft
|
209 |
+
///
|
210 |
+
/// Example:
|
211 |
+
/// ```
|
212 |
+
/// auto t = torch::randn(128 / 2 + 1, torch::kComplexDouble);
|
213 |
+
/// auto T = torch::fft::hfft(t, /*n=*/128);
|
214 |
+
/// assert(T.is_floating_point() && T.numel() == 128);
|
215 |
+
/// ```
|
216 |
+
inline Tensor hfft(
|
217 |
+
const Tensor& self,
|
218 |
+
c10::optional<SymInt> n = c10::nullopt,
|
219 |
+
int64_t dim = -1,
|
220 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
221 |
+
return torch::fft_hfft_symint(self, n, dim, norm);
|
222 |
+
}
|
223 |
+
|
224 |
+
/// Computes the inverse FFT of a real-valued Fourier domain signal.
|
225 |
+
///
|
226 |
+
/// The output is a onesided representation of the Hermitian symmetric time
|
227 |
+
/// domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.ihfft.
|
228 |
+
///
|
229 |
+
/// Example:
|
230 |
+
/// ```
|
231 |
+
/// auto T = torch::randn(128, torch::kDouble);
|
232 |
+
/// auto t = torch::fft::ihfft(T);
|
233 |
+
/// assert(t.is_complex() && T.numel() == 128 / 2 + 1);
|
234 |
+
/// ```
|
235 |
+
inline Tensor ihfft(
|
236 |
+
const Tensor& self,
|
237 |
+
c10::optional<SymInt> n = c10::nullopt,
|
238 |
+
int64_t dim = -1,
|
239 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
240 |
+
return torch::fft_ihfft_symint(self, n, dim, norm);
|
241 |
+
}
|
242 |
+
|
243 |
+
/// Computes the 2-dimensional FFT of a Hermitian symmetric input signal.
|
244 |
+
///
|
245 |
+
/// The input is a onesided representation of the Hermitian symmetric time
|
246 |
+
/// domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.hfft2.
|
247 |
+
///
|
248 |
+
/// Example:
|
249 |
+
/// ```
|
250 |
+
/// auto t = torch::randn({128, 65}, torch::kComplexDouble);
|
251 |
+
/// auto T = torch::fft::hfft2(t, /*s=*/{128, 128});
|
252 |
+
/// assert(T.is_floating_point() && T.numel() == 128 * 128);
|
253 |
+
/// ```
|
254 |
+
inline Tensor hfft2(
|
255 |
+
const Tensor& self,
|
256 |
+
at::OptionalIntArrayRef s = c10::nullopt,
|
257 |
+
IntArrayRef dim = {-2, -1},
|
258 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
259 |
+
return torch::fft_hfft2(self, s, dim, norm);
|
260 |
+
}
|
261 |
+
|
262 |
+
/// Computes the 2-dimensional IFFT of a real input signal.
|
263 |
+
///
|
264 |
+
/// The output is a onesided representation of the Hermitian symmetric time
|
265 |
+
/// domain signal. See
|
266 |
+
/// https://pytorch.org/docs/master/fft.html#torch.fft.ihfft2.
|
267 |
+
///
|
268 |
+
/// Example:
|
269 |
+
/// ```
|
270 |
+
/// auto T = torch::randn({128, 128}, torch::kDouble);
|
271 |
+
/// auto t = torch::fft::hfft2(T);
|
272 |
+
/// assert(t.is_complex() && t.size(1) == 65);
|
273 |
+
/// ```
|
274 |
+
inline Tensor ihfft2(
|
275 |
+
const Tensor& self,
|
276 |
+
at::OptionalIntArrayRef s = c10::nullopt,
|
277 |
+
IntArrayRef dim = {-2, -1},
|
278 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
279 |
+
return torch::fft_ihfft2(self, s, dim, norm);
|
280 |
+
}
|
281 |
+
|
282 |
+
/// Computes the N-dimensional FFT of a Hermitian symmetric input signal.
|
283 |
+
///
|
284 |
+
/// The input is a onesided representation of the Hermitian symmetric time
|
285 |
+
/// domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.hfftn.
|
286 |
+
///
|
287 |
+
/// Example:
|
288 |
+
/// ```
|
289 |
+
/// auto t = torch::randn({128, 65}, torch::kComplexDouble);
|
290 |
+
/// auto T = torch::fft::hfftn(t, /*s=*/{128, 128});
|
291 |
+
/// assert(T.is_floating_point() && T.numel() == 128 * 128);
|
292 |
+
/// ```
|
293 |
+
inline Tensor hfftn(
|
294 |
+
const Tensor& self,
|
295 |
+
at::OptionalIntArrayRef s = c10::nullopt,
|
296 |
+
IntArrayRef dim = {-2, -1},
|
297 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
298 |
+
return torch::fft_hfftn(self, s, dim, norm);
|
299 |
+
}
|
300 |
+
|
301 |
+
/// Computes the N-dimensional IFFT of a real input signal.
|
302 |
+
///
|
303 |
+
/// The output is a onesided representation of the Hermitian symmetric time
|
304 |
+
/// domain signal. See
|
305 |
+
/// https://pytorch.org/docs/master/fft.html#torch.fft.ihfftn.
|
306 |
+
///
|
307 |
+
/// Example:
|
308 |
+
/// ```
|
309 |
+
/// auto T = torch::randn({128, 128}, torch::kDouble);
|
310 |
+
/// auto t = torch::fft::hfft2(T);
|
311 |
+
/// assert(t.is_complex() && t.size(1) == 65);
|
312 |
+
/// ```
|
313 |
+
inline Tensor ihfftn(
|
314 |
+
const Tensor& self,
|
315 |
+
at::OptionalIntArrayRef s = c10::nullopt,
|
316 |
+
IntArrayRef dim = {-2, -1},
|
317 |
+
c10::optional<c10::string_view> norm = c10::nullopt) {
|
318 |
+
return torch::fft_ihfftn(self, s, dim, norm);
|
319 |
+
}
|
320 |
+
|
321 |
+
/// Computes the discrete Fourier Transform sample frequencies for a signal of
|
322 |
+
/// size n.
|
323 |
+
///
|
324 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.fftfreq
|
325 |
+
///
|
326 |
+
/// Example:
|
327 |
+
/// ```
|
328 |
+
/// auto frequencies = torch::fft::fftfreq(128, torch::kDouble);
|
329 |
+
/// ```
|
330 |
+
inline Tensor fftfreq(int64_t n, double d, const TensorOptions& options = {}) {
|
331 |
+
return torch::fft_fftfreq(n, d, options);
|
332 |
+
}
|
333 |
+
|
334 |
+
inline Tensor fftfreq(int64_t n, const TensorOptions& options = {}) {
|
335 |
+
return torch::fft_fftfreq(n, /*d=*/1.0, options);
|
336 |
+
}
|
337 |
+
|
338 |
+
/// Computes the sample frequencies for torch.fft.rfft with a signal of size n.
|
339 |
+
///
|
340 |
+
/// Like torch.fft.rfft, only the positive frequencies are included.
|
341 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.rfftfreq
|
342 |
+
///
|
343 |
+
/// Example:
|
344 |
+
/// ```
|
345 |
+
/// auto frequencies = torch::fft::rfftfreq(128, torch::kDouble);
|
346 |
+
/// ```
|
347 |
+
inline Tensor rfftfreq(int64_t n, double d, const TensorOptions& options) {
|
348 |
+
return torch::fft_rfftfreq(n, d, options);
|
349 |
+
}
|
350 |
+
|
351 |
+
inline Tensor rfftfreq(int64_t n, const TensorOptions& options) {
|
352 |
+
return torch::fft_rfftfreq(n, /*d=*/1.0, options);
|
353 |
+
}
|
354 |
+
|
355 |
+
/// Reorders n-dimensional FFT output to have negative frequency terms first, by
|
356 |
+
/// a torch.roll operation.
|
357 |
+
///
|
358 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.fftshift
|
359 |
+
///
|
360 |
+
/// Example:
|
361 |
+
/// ```
|
362 |
+
/// auto x = torch::randn({127, 4});
|
363 |
+
/// auto centred_fft = torch::fft::fftshift(torch::fft::fftn(x));
|
364 |
+
/// ```
|
365 |
+
inline Tensor fftshift(
|
366 |
+
const Tensor& x,
|
367 |
+
at::OptionalIntArrayRef dim = c10::nullopt) {
|
368 |
+
return torch::fft_fftshift(x, dim);
|
369 |
+
}
|
370 |
+
|
371 |
+
/// Inverse of torch.fft.fftshift
|
372 |
+
///
|
373 |
+
/// See https://pytorch.org/docs/master/fft.html#torch.fft.ifftshift
|
374 |
+
///
|
375 |
+
/// Example:
|
376 |
+
/// ```
|
377 |
+
/// auto x = torch::randn({127, 4});
|
378 |
+
/// auto shift = torch::fft::fftshift(x)
|
379 |
+
/// auto unshift = torch::fft::ifftshift(shift);
|
380 |
+
/// assert(torch::allclose(x, unshift));
|
381 |
+
/// ```
|
382 |
+
inline Tensor ifftshift(
|
383 |
+
const Tensor& x,
|
384 |
+
at::OptionalIntArrayRef dim = c10::nullopt) {
|
385 |
+
return torch::fft_ifftshift(x, dim);
|
386 |
+
}
|
387 |
+
|
388 |
+
} // namespace fft
|
389 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/imethod.h
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/core/ivalue.h>
|
3 |
+
#include <vector>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
|
7 |
+
class TORCH_API IMethod {
|
8 |
+
/*
|
9 |
+
IMethod provides a portable interface for torch methods, whether
|
10 |
+
they are backed by torchscript or python/deploy.
|
11 |
+
|
12 |
+
This is helpful since torchscript methods provide additional information
|
13 |
+
(e.g. FunctionSchema, Graph) which aren't available in pure python methods.
|
14 |
+
|
15 |
+
Higher level APIs should prefer depending on this interface rather
|
16 |
+
than a specific implementation of it, to promote portability and reuse, and
|
17 |
+
avoid unintentional dependencies on e.g. script methods.
|
18 |
+
|
19 |
+
Note: This API is experimental, and may evolve.
|
20 |
+
*/
|
21 |
+
public:
|
22 |
+
using IValueList = std::vector<c10::IValue>;
|
23 |
+
using IValueMap = std::unordered_map<std::string, at::IValue>;
|
24 |
+
|
25 |
+
IMethod() = default;
|
26 |
+
IMethod(const IMethod&) = default;
|
27 |
+
IMethod& operator=(const IMethod&) = default;
|
28 |
+
IMethod(IMethod&&) noexcept = default;
|
29 |
+
IMethod& operator=(IMethod&&) noexcept = default;
|
30 |
+
virtual ~IMethod() = default;
|
31 |
+
|
32 |
+
virtual c10::IValue operator()(
|
33 |
+
std::vector<c10::IValue> args,
|
34 |
+
const IValueMap& kwargs = IValueMap()) const = 0;
|
35 |
+
|
36 |
+
virtual const std::string& name() const = 0;
|
37 |
+
|
38 |
+
// Returns an ordered list of argument names, possible in both
|
39 |
+
// script and python methods. This is a more portable dependency
|
40 |
+
// than a ScriptMethod FunctionSchema, which has more information
|
41 |
+
// than can be generally expected from a python method.
|
42 |
+
const std::vector<std::string>& getArgumentNames() const;
|
43 |
+
|
44 |
+
protected:
|
45 |
+
virtual void setArgumentNames(
|
46 |
+
std::vector<std::string>& argumentNames) const = 0;
|
47 |
+
|
48 |
+
private:
|
49 |
+
mutable bool isArgumentNamesInitialized_{false};
|
50 |
+
mutable std::vector<std::string> argumentNames_;
|
51 |
+
};
|
52 |
+
|
53 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/jit.h
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
#include <torch/csrc/jit/api/module.h>
|
5 |
+
|
6 |
+
#include <memory>
|
7 |
+
#include <string>
|
8 |
+
|
9 |
+
namespace torch {
|
10 |
+
namespace jit {
|
11 |
+
|
12 |
+
/// Compiles script code into an executable graph.
|
13 |
+
///
|
14 |
+
/// Takes a string containing functions in script syntax and compiles them into
|
15 |
+
/// a module (graph). The returned module provides a `run_method` function
|
16 |
+
/// that may be used to invoke the compiled functions.
|
17 |
+
///
|
18 |
+
/// For example:
|
19 |
+
/// \rst
|
20 |
+
/// .. code-block:: cpp
|
21 |
+
///
|
22 |
+
/// auto module = torch::jit::compile(R"JIT(
|
23 |
+
/// def relu_script(a, b):
|
24 |
+
/// return torch.relu(a + b)
|
25 |
+
/// def test_while(a, i):
|
26 |
+
/// while i < 10:
|
27 |
+
/// a += a
|
28 |
+
/// i += 1
|
29 |
+
/// return a
|
30 |
+
/// )JIT");
|
31 |
+
/// IValue output = module->run_method("relu_script", a, b);
|
32 |
+
/// \endrst
|
33 |
+
TORCH_API std::shared_ptr<CompilationUnit> compile(const std::string& source);
|
34 |
+
|
35 |
+
} // namespace jit
|
36 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/linalg.h
ADDED
@@ -0,0 +1,1065 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/ATen.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace linalg {
|
7 |
+
|
8 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
9 |
+
namespace detail {
|
10 |
+
|
11 |
+
inline Tensor cholesky(const Tensor& self) {
|
12 |
+
return torch::linalg_cholesky(self);
|
13 |
+
}
|
14 |
+
|
15 |
+
inline Tensor cholesky_out(Tensor& result, const Tensor& self) {
|
16 |
+
return torch::linalg_cholesky_out(result, self);
|
17 |
+
}
|
18 |
+
|
19 |
+
inline Tensor det(const Tensor& self) {
|
20 |
+
return torch::linalg_det(self);
|
21 |
+
}
|
22 |
+
|
23 |
+
inline std::tuple<Tensor, Tensor> slogdet(const Tensor& input) {
|
24 |
+
return torch::linalg_slogdet(input);
|
25 |
+
}
|
26 |
+
|
27 |
+
inline std::tuple<Tensor&, Tensor&> slogdet_out(
|
28 |
+
Tensor& sign,
|
29 |
+
Tensor& logabsdet,
|
30 |
+
const Tensor& input) {
|
31 |
+
return torch::linalg_slogdet_out(sign, logabsdet, input);
|
32 |
+
}
|
33 |
+
|
34 |
+
inline std::tuple<Tensor, Tensor> eig(const Tensor& self) {
|
35 |
+
return torch::linalg_eig(self);
|
36 |
+
}
|
37 |
+
|
38 |
+
inline std::tuple<Tensor&, Tensor&> eig_out(
|
39 |
+
Tensor& eigvals,
|
40 |
+
Tensor& eigvecs,
|
41 |
+
const Tensor& self) {
|
42 |
+
return torch::linalg_eig_out(eigvals, eigvecs, self);
|
43 |
+
}
|
44 |
+
|
45 |
+
inline Tensor eigvals(const Tensor& self) {
|
46 |
+
return torch::linalg_eigvals(self);
|
47 |
+
}
|
48 |
+
|
49 |
+
inline Tensor& eigvals_out(Tensor& result, const Tensor& self) {
|
50 |
+
return torch::linalg_eigvals_out(result, self);
|
51 |
+
}
|
52 |
+
|
53 |
+
inline std::tuple<Tensor, Tensor> eigh(
|
54 |
+
const Tensor& self,
|
55 |
+
c10::string_view uplo) {
|
56 |
+
return torch::linalg_eigh(self, uplo);
|
57 |
+
}
|
58 |
+
|
59 |
+
inline std::tuple<Tensor&, Tensor&> eigh_out(
|
60 |
+
Tensor& eigvals,
|
61 |
+
Tensor& eigvecs,
|
62 |
+
const Tensor& self,
|
63 |
+
c10::string_view uplo) {
|
64 |
+
return torch::linalg_eigh_out(eigvals, eigvecs, self, uplo);
|
65 |
+
}
|
66 |
+
|
67 |
+
inline Tensor eigvalsh(const Tensor& self, c10::string_view uplo) {
|
68 |
+
return torch::linalg_eigvalsh(self, uplo);
|
69 |
+
}
|
70 |
+
|
71 |
+
inline Tensor& eigvalsh_out(
|
72 |
+
Tensor& result,
|
73 |
+
const Tensor& self,
|
74 |
+
c10::string_view uplo) {
|
75 |
+
return torch::linalg_eigvalsh_out(result, self, uplo);
|
76 |
+
}
|
77 |
+
|
78 |
+
inline Tensor householder_product(const Tensor& input, const Tensor& tau) {
|
79 |
+
return torch::linalg_householder_product(input, tau);
|
80 |
+
}
|
81 |
+
|
82 |
+
inline Tensor& householder_product_out(
|
83 |
+
Tensor& result,
|
84 |
+
const Tensor& input,
|
85 |
+
const Tensor& tau) {
|
86 |
+
return torch::linalg_householder_product_out(result, input, tau);
|
87 |
+
}
|
88 |
+
|
89 |
+
inline std::tuple<Tensor, Tensor> lu_factor(
|
90 |
+
const Tensor& self,
|
91 |
+
const bool pivot) {
|
92 |
+
return torch::linalg_lu_factor(self, pivot);
|
93 |
+
}
|
94 |
+
|
95 |
+
inline std::tuple<Tensor&, Tensor&> lu_factor_out(
|
96 |
+
Tensor& LU,
|
97 |
+
Tensor& pivots,
|
98 |
+
const Tensor& self,
|
99 |
+
const bool pivot) {
|
100 |
+
return torch::linalg_lu_factor_out(LU, pivots, self, pivot);
|
101 |
+
}
|
102 |
+
|
103 |
+
inline std::tuple<Tensor, Tensor, Tensor> lu(
|
104 |
+
const Tensor& self,
|
105 |
+
const bool pivot) {
|
106 |
+
return torch::linalg_lu(self, pivot);
|
107 |
+
}
|
108 |
+
|
109 |
+
inline std::tuple<Tensor&, Tensor&, Tensor&> lu_out(
|
110 |
+
Tensor& P,
|
111 |
+
Tensor& L,
|
112 |
+
Tensor& U,
|
113 |
+
const Tensor& self,
|
114 |
+
const bool pivot) {
|
115 |
+
return torch::linalg_lu_out(P, L, U, self, pivot);
|
116 |
+
}
|
117 |
+
|
118 |
+
inline std::tuple<Tensor, Tensor, Tensor, Tensor> lstsq(
|
119 |
+
const Tensor& self,
|
120 |
+
const Tensor& b,
|
121 |
+
c10::optional<double> cond,
|
122 |
+
c10::optional<c10::string_view> driver) {
|
123 |
+
return torch::linalg_lstsq(self, b, cond, driver);
|
124 |
+
}
|
125 |
+
|
126 |
+
inline Tensor matrix_exp(const Tensor& self) {
|
127 |
+
return torch::linalg_matrix_exp(self);
|
128 |
+
}
|
129 |
+
|
130 |
+
inline Tensor norm(
|
131 |
+
const Tensor& self,
|
132 |
+
const optional<Scalar>& opt_ord,
|
133 |
+
OptionalIntArrayRef opt_dim,
|
134 |
+
bool keepdim,
|
135 |
+
optional<ScalarType> opt_dtype) {
|
136 |
+
return torch::linalg_norm(self, opt_ord, opt_dim, keepdim, opt_dtype);
|
137 |
+
}
|
138 |
+
|
139 |
+
inline Tensor norm(
|
140 |
+
const Tensor& self,
|
141 |
+
c10::string_view ord,
|
142 |
+
OptionalIntArrayRef opt_dim,
|
143 |
+
bool keepdim,
|
144 |
+
optional<ScalarType> opt_dtype) {
|
145 |
+
return torch::linalg_norm(self, ord, opt_dim, keepdim, opt_dtype);
|
146 |
+
}
|
147 |
+
|
148 |
+
inline Tensor& norm_out(
|
149 |
+
Tensor& result,
|
150 |
+
const Tensor& self,
|
151 |
+
const optional<Scalar>& opt_ord,
|
152 |
+
OptionalIntArrayRef opt_dim,
|
153 |
+
bool keepdim,
|
154 |
+
optional<ScalarType> opt_dtype) {
|
155 |
+
return torch::linalg_norm_out(
|
156 |
+
result, self, opt_ord, opt_dim, keepdim, opt_dtype);
|
157 |
+
}
|
158 |
+
|
159 |
+
inline Tensor& norm_out(
|
160 |
+
Tensor& result,
|
161 |
+
const Tensor& self,
|
162 |
+
c10::string_view ord,
|
163 |
+
OptionalIntArrayRef opt_dim,
|
164 |
+
bool keepdim,
|
165 |
+
optional<ScalarType> opt_dtype) {
|
166 |
+
return torch::linalg_norm_out(result, self, ord, opt_dim, keepdim, opt_dtype);
|
167 |
+
}
|
168 |
+
|
169 |
+
inline Tensor vector_norm(
|
170 |
+
const Tensor& self,
|
171 |
+
Scalar ord,
|
172 |
+
OptionalIntArrayRef opt_dim,
|
173 |
+
bool keepdim,
|
174 |
+
optional<ScalarType> opt_dtype) {
|
175 |
+
return torch::linalg_vector_norm(self, ord, opt_dim, keepdim, opt_dtype);
|
176 |
+
}
|
177 |
+
|
178 |
+
inline Tensor& vector_norm_out(
|
179 |
+
Tensor& result,
|
180 |
+
const Tensor& self,
|
181 |
+
Scalar ord,
|
182 |
+
OptionalIntArrayRef opt_dim,
|
183 |
+
bool keepdim,
|
184 |
+
optional<ScalarType> opt_dtype) {
|
185 |
+
return torch::linalg_vector_norm_out(
|
186 |
+
result, self, ord, opt_dim, keepdim, opt_dtype);
|
187 |
+
}
|
188 |
+
|
189 |
+
inline Tensor matrix_norm(
|
190 |
+
const Tensor& self,
|
191 |
+
const Scalar& ord,
|
192 |
+
IntArrayRef dim,
|
193 |
+
bool keepdim,
|
194 |
+
optional<ScalarType> dtype) {
|
195 |
+
return torch::linalg_matrix_norm(self, ord, dim, keepdim, dtype);
|
196 |
+
}
|
197 |
+
|
198 |
+
inline Tensor& matrix_norm_out(
|
199 |
+
const Tensor& self,
|
200 |
+
const Scalar& ord,
|
201 |
+
IntArrayRef dim,
|
202 |
+
bool keepdim,
|
203 |
+
optional<ScalarType> dtype,
|
204 |
+
Tensor& result) {
|
205 |
+
return torch::linalg_matrix_norm_out(result, self, ord, dim, keepdim, dtype);
|
206 |
+
}
|
207 |
+
|
208 |
+
inline Tensor matrix_norm(
|
209 |
+
const Tensor& self,
|
210 |
+
std::string ord,
|
211 |
+
IntArrayRef dim,
|
212 |
+
bool keepdim,
|
213 |
+
optional<ScalarType> dtype) {
|
214 |
+
return torch::linalg_matrix_norm(self, ord, dim, keepdim, dtype);
|
215 |
+
}
|
216 |
+
|
217 |
+
inline Tensor& matrix_norm_out(
|
218 |
+
const Tensor& self,
|
219 |
+
std::string ord,
|
220 |
+
IntArrayRef dim,
|
221 |
+
bool keepdim,
|
222 |
+
optional<ScalarType> dtype,
|
223 |
+
Tensor& result) {
|
224 |
+
return torch::linalg_matrix_norm_out(result, self, ord, dim, keepdim, dtype);
|
225 |
+
}
|
226 |
+
|
227 |
+
inline Tensor matrix_power(const Tensor& self, int64_t n) {
|
228 |
+
return torch::linalg_matrix_power(self, n);
|
229 |
+
}
|
230 |
+
|
231 |
+
inline Tensor& matrix_power_out(const Tensor& self, int64_t n, Tensor& result) {
|
232 |
+
return torch::linalg_matrix_power_out(result, self, n);
|
233 |
+
}
|
234 |
+
|
235 |
+
inline Tensor matrix_rank(const Tensor& input, double tol, bool hermitian) {
|
236 |
+
return torch::linalg_matrix_rank(input, tol, hermitian);
|
237 |
+
}
|
238 |
+
|
239 |
+
inline Tensor matrix_rank(
|
240 |
+
const Tensor& input,
|
241 |
+
const Tensor& tol,
|
242 |
+
bool hermitian) {
|
243 |
+
return torch::linalg_matrix_rank(input, tol, hermitian);
|
244 |
+
}
|
245 |
+
|
246 |
+
inline Tensor matrix_rank(
|
247 |
+
const Tensor& input,
|
248 |
+
c10::optional<double> atol,
|
249 |
+
c10::optional<double> rtol,
|
250 |
+
bool hermitian) {
|
251 |
+
return torch::linalg_matrix_rank(input, atol, rtol, hermitian);
|
252 |
+
}
|
253 |
+
|
254 |
+
inline Tensor matrix_rank(
|
255 |
+
const Tensor& input,
|
256 |
+
const c10::optional<Tensor>& atol,
|
257 |
+
const c10::optional<Tensor>& rtol,
|
258 |
+
bool hermitian) {
|
259 |
+
return torch::linalg_matrix_rank(input, atol, rtol, hermitian);
|
260 |
+
}
|
261 |
+
|
262 |
+
inline Tensor& matrix_rank_out(
|
263 |
+
Tensor& result,
|
264 |
+
const Tensor& input,
|
265 |
+
double tol,
|
266 |
+
bool hermitian) {
|
267 |
+
return torch::linalg_matrix_rank_out(result, input, tol, hermitian);
|
268 |
+
}
|
269 |
+
|
270 |
+
inline Tensor& matrix_rank_out(
|
271 |
+
Tensor& result,
|
272 |
+
const Tensor& input,
|
273 |
+
const Tensor& tol,
|
274 |
+
bool hermitian) {
|
275 |
+
return torch::linalg_matrix_rank_out(result, input, tol, hermitian);
|
276 |
+
}
|
277 |
+
|
278 |
+
inline Tensor& matrix_rank_out(
|
279 |
+
Tensor& result,
|
280 |
+
const Tensor& input,
|
281 |
+
c10::optional<double> atol,
|
282 |
+
c10::optional<double> rtol,
|
283 |
+
bool hermitian) {
|
284 |
+
return torch::linalg_matrix_rank_out(result, input, atol, rtol, hermitian);
|
285 |
+
}
|
286 |
+
|
287 |
+
inline Tensor& matrix_rank_out(
|
288 |
+
Tensor& result,
|
289 |
+
const Tensor& input,
|
290 |
+
const c10::optional<Tensor>& atol,
|
291 |
+
const c10::optional<Tensor>& rtol,
|
292 |
+
bool hermitian) {
|
293 |
+
return torch::linalg_matrix_rank_out(result, input, atol, rtol, hermitian);
|
294 |
+
}
|
295 |
+
|
296 |
+
inline Tensor multi_dot(TensorList tensors) {
|
297 |
+
return torch::linalg_multi_dot(tensors);
|
298 |
+
}
|
299 |
+
|
300 |
+
inline Tensor& multi_dot_out(TensorList tensors, Tensor& result) {
|
301 |
+
return torch::linalg_multi_dot_out(result, tensors);
|
302 |
+
}
|
303 |
+
|
304 |
+
inline Tensor pinv(const Tensor& input, double rcond, bool hermitian) {
|
305 |
+
return torch::linalg_pinv(input, rcond, hermitian);
|
306 |
+
}
|
307 |
+
|
308 |
+
inline Tensor& pinv_out(
|
309 |
+
Tensor& result,
|
310 |
+
const Tensor& input,
|
311 |
+
double rcond,
|
312 |
+
bool hermitian) {
|
313 |
+
return torch::linalg_pinv_out(result, input, rcond, hermitian);
|
314 |
+
}
|
315 |
+
|
316 |
+
inline std::tuple<Tensor, Tensor> qr(
|
317 |
+
const Tensor& input,
|
318 |
+
c10::string_view mode) {
|
319 |
+
return torch::linalg_qr(input, mode);
|
320 |
+
}
|
321 |
+
|
322 |
+
inline std::tuple<Tensor&, Tensor&> qr_out(
|
323 |
+
Tensor& Q,
|
324 |
+
Tensor& R,
|
325 |
+
const Tensor& input,
|
326 |
+
c10::string_view mode) {
|
327 |
+
return torch::linalg_qr_out(Q, R, input, mode);
|
328 |
+
}
|
329 |
+
|
330 |
+
inline std::tuple<Tensor, Tensor> solve_ex(
|
331 |
+
const Tensor& input,
|
332 |
+
const Tensor& other,
|
333 |
+
bool left,
|
334 |
+
bool check_errors) {
|
335 |
+
return torch::linalg_solve_ex(input, other, left, check_errors);
|
336 |
+
}
|
337 |
+
|
338 |
+
inline std::tuple<Tensor&, Tensor&> solve_ex_out(
|
339 |
+
Tensor& result,
|
340 |
+
Tensor& info,
|
341 |
+
const Tensor& input,
|
342 |
+
const Tensor& other,
|
343 |
+
bool left,
|
344 |
+
bool check_errors) {
|
345 |
+
return torch::linalg_solve_ex_out(
|
346 |
+
result, info, input, other, left, check_errors);
|
347 |
+
}
|
348 |
+
|
349 |
+
inline Tensor solve(const Tensor& input, const Tensor& other, bool left) {
|
350 |
+
return torch::linalg_solve(input, other, left);
|
351 |
+
}
|
352 |
+
|
353 |
+
inline Tensor& solve_out(
|
354 |
+
Tensor& result,
|
355 |
+
const Tensor& input,
|
356 |
+
const Tensor& other,
|
357 |
+
bool left) {
|
358 |
+
return torch::linalg_solve_out(result, input, other, left);
|
359 |
+
}
|
360 |
+
|
361 |
+
inline Tensor solve_triangular(
|
362 |
+
const Tensor& input,
|
363 |
+
const Tensor& other,
|
364 |
+
bool upper,
|
365 |
+
bool left,
|
366 |
+
bool unitriangular) {
|
367 |
+
return torch::linalg_solve_triangular(
|
368 |
+
input, other, upper, left, unitriangular);
|
369 |
+
}
|
370 |
+
|
371 |
+
inline Tensor& solve_triangular_out(
|
372 |
+
Tensor& result,
|
373 |
+
const Tensor& input,
|
374 |
+
const Tensor& other,
|
375 |
+
bool upper,
|
376 |
+
bool left,
|
377 |
+
bool unitriangular) {
|
378 |
+
return torch::linalg_solve_triangular_out(
|
379 |
+
result, input, other, upper, left, unitriangular);
|
380 |
+
}
|
381 |
+
|
382 |
+
inline std::tuple<Tensor, Tensor, Tensor> svd(
|
383 |
+
const Tensor& input,
|
384 |
+
bool full_matrices,
|
385 |
+
c10::optional<c10::string_view> driver) {
|
386 |
+
return torch::linalg_svd(input, full_matrices, driver);
|
387 |
+
}
|
388 |
+
|
389 |
+
inline std::tuple<Tensor&, Tensor&, Tensor&> svd_out(
|
390 |
+
Tensor& U,
|
391 |
+
Tensor& S,
|
392 |
+
Tensor& Vh,
|
393 |
+
const Tensor& input,
|
394 |
+
bool full_matrices,
|
395 |
+
c10::optional<c10::string_view> driver) {
|
396 |
+
return torch::linalg_svd_out(U, S, Vh, input, full_matrices, driver);
|
397 |
+
}
|
398 |
+
|
399 |
+
inline Tensor svdvals(
|
400 |
+
const Tensor& input,
|
401 |
+
c10::optional<c10::string_view> driver) {
|
402 |
+
return torch::linalg_svdvals(input, driver);
|
403 |
+
}
|
404 |
+
|
405 |
+
inline Tensor& svdvals_out(
|
406 |
+
Tensor& result,
|
407 |
+
const Tensor& input,
|
408 |
+
c10::optional<c10::string_view> driver) {
|
409 |
+
return torch::linalg_svdvals_out(result, input, driver);
|
410 |
+
}
|
411 |
+
|
412 |
+
inline Tensor tensorinv(const Tensor& self, int64_t ind) {
|
413 |
+
return torch::linalg_tensorinv(self, ind);
|
414 |
+
}
|
415 |
+
|
416 |
+
inline Tensor& tensorinv_out(Tensor& result, const Tensor& self, int64_t ind) {
|
417 |
+
return torch::linalg_tensorinv_out(result, self, ind);
|
418 |
+
}
|
419 |
+
|
420 |
+
inline Tensor tensorsolve(
|
421 |
+
const Tensor& self,
|
422 |
+
const Tensor& other,
|
423 |
+
OptionalIntArrayRef dims) {
|
424 |
+
return torch::linalg_tensorsolve(self, other, dims);
|
425 |
+
}
|
426 |
+
|
427 |
+
inline Tensor& tensorsolve_out(
|
428 |
+
Tensor& result,
|
429 |
+
const Tensor& self,
|
430 |
+
const Tensor& other,
|
431 |
+
OptionalIntArrayRef dims) {
|
432 |
+
return torch::linalg_tensorsolve_out(result, self, other, dims);
|
433 |
+
}
|
434 |
+
|
435 |
+
inline Tensor inv(const Tensor& input) {
|
436 |
+
return torch::linalg_inv(input);
|
437 |
+
}
|
438 |
+
|
439 |
+
inline Tensor& inv_out(Tensor& result, const Tensor& input) {
|
440 |
+
return torch::linalg_inv_out(result, input);
|
441 |
+
}
|
442 |
+
|
443 |
+
} // namespace detail
|
444 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
445 |
+
|
446 |
+
/// Cholesky decomposition
|
447 |
+
///
|
448 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.cholesky
|
449 |
+
///
|
450 |
+
/// Example:
|
451 |
+
/// ```
|
452 |
+
/// auto A = torch::randn({4, 4});
|
453 |
+
/// auto A = torch::matmul(A, A.t());
|
454 |
+
/// auto L = torch::linalg::cholesky(A);
|
455 |
+
/// assert(torch::allclose(torch::matmul(L, L.t()), A));
|
456 |
+
/// ```
|
457 |
+
inline Tensor cholesky(const Tensor& self) {
|
458 |
+
return detail::cholesky(self);
|
459 |
+
}
|
460 |
+
|
461 |
+
inline Tensor cholesky_out(Tensor& result, const Tensor& self) {
|
462 |
+
return detail::cholesky_out(result, self);
|
463 |
+
}
|
464 |
+
|
465 |
+
// C10_DEPRECATED_MESSAGE("linalg_det is deprecated, use det instead.")
|
466 |
+
inline Tensor linalg_det(const Tensor& self) {
|
467 |
+
return detail::det(self);
|
468 |
+
}
|
469 |
+
|
470 |
+
/// See the documentation of torch.linalg.det
|
471 |
+
inline Tensor det(const Tensor& self) {
|
472 |
+
return detail::det(self);
|
473 |
+
}
|
474 |
+
|
475 |
+
/// Computes the sign and (natural) logarithm of the determinant
|
476 |
+
///
|
477 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.slogdet
|
478 |
+
inline std::tuple<Tensor, Tensor> slogdet(const Tensor& input) {
|
479 |
+
return detail::slogdet(input);
|
480 |
+
}
|
481 |
+
|
482 |
+
inline std::tuple<Tensor&, Tensor&> slogdet_out(
|
483 |
+
Tensor& sign,
|
484 |
+
Tensor& logabsdet,
|
485 |
+
const Tensor& input) {
|
486 |
+
return detail::slogdet_out(sign, logabsdet, input);
|
487 |
+
}
|
488 |
+
|
489 |
+
/// Computes eigenvalues and eigenvectors of non-symmetric/non-hermitian
|
490 |
+
/// matrices
|
491 |
+
///
|
492 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eig
|
493 |
+
inline std::tuple<Tensor, Tensor> eig(const Tensor& self) {
|
494 |
+
return detail::eig(self);
|
495 |
+
}
|
496 |
+
|
497 |
+
inline std::tuple<Tensor&, Tensor&> eig_out(
|
498 |
+
Tensor& eigvals,
|
499 |
+
Tensor& eigvecs,
|
500 |
+
const Tensor& self) {
|
501 |
+
return detail::eig_out(eigvals, eigvecs, self);
|
502 |
+
}
|
503 |
+
|
504 |
+
/// Computes eigenvalues of non-symmetric/non-hermitian matrices
|
505 |
+
///
|
506 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigvals
|
507 |
+
inline Tensor eigvals(const Tensor& self) {
|
508 |
+
return detail::eigvals(self);
|
509 |
+
}
|
510 |
+
|
511 |
+
inline Tensor& eigvals_out(Tensor& result, const Tensor& self) {
|
512 |
+
return detail::eigvals_out(result, self);
|
513 |
+
}
|
514 |
+
|
515 |
+
/// Computes eigenvalues and eigenvectors
|
516 |
+
///
|
517 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigh
|
518 |
+
inline std::tuple<Tensor, Tensor> eigh(
|
519 |
+
const Tensor& self,
|
520 |
+
c10::string_view uplo) {
|
521 |
+
return detail::eigh(self, uplo);
|
522 |
+
}
|
523 |
+
|
524 |
+
inline std::tuple<Tensor&, Tensor&> eigh_out(
|
525 |
+
Tensor& eigvals,
|
526 |
+
Tensor& eigvecs,
|
527 |
+
const Tensor& self,
|
528 |
+
c10::string_view uplo) {
|
529 |
+
return detail::eigh_out(eigvals, eigvecs, self, uplo);
|
530 |
+
}
|
531 |
+
|
532 |
+
/// Computes eigenvalues
|
533 |
+
///
|
534 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigvalsh
|
535 |
+
inline Tensor eigvalsh(const Tensor& self, c10::string_view uplo) {
|
536 |
+
return detail::eigvalsh(self, uplo);
|
537 |
+
}
|
538 |
+
|
539 |
+
inline Tensor& eigvalsh_out(
|
540 |
+
Tensor& result,
|
541 |
+
const Tensor& self,
|
542 |
+
c10::string_view uplo) {
|
543 |
+
return detail::eigvalsh_out(result, self, uplo);
|
544 |
+
}
|
545 |
+
|
546 |
+
/// Computes the product of Householder matrices
|
547 |
+
///
|
548 |
+
/// See
|
549 |
+
/// https://pytorch.org/docs/master/linalg.html#torch.linalg.householder_product
|
550 |
+
inline Tensor householder_product(const Tensor& input, const Tensor& tau) {
|
551 |
+
return detail::householder_product(input, tau);
|
552 |
+
}
|
553 |
+
|
554 |
+
inline Tensor& householder_product_out(
|
555 |
+
Tensor& result,
|
556 |
+
const Tensor& input,
|
557 |
+
const Tensor& tau) {
|
558 |
+
return detail::householder_product_out(result, input, tau);
|
559 |
+
}
|
560 |
+
|
561 |
+
inline std::tuple<Tensor, Tensor, Tensor, Tensor> lstsq(
|
562 |
+
const Tensor& self,
|
563 |
+
const Tensor& b,
|
564 |
+
c10::optional<double> cond,
|
565 |
+
c10::optional<c10::string_view> driver) {
|
566 |
+
return detail::lstsq(self, b, cond, driver);
|
567 |
+
}
|
568 |
+
|
569 |
+
/// Computes the matrix exponential
|
570 |
+
///
|
571 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_exp
|
572 |
+
inline Tensor matrix_exp(const Tensor& input) {
|
573 |
+
return detail::matrix_exp(input);
|
574 |
+
}
|
575 |
+
|
576 |
+
// C10_DEPRECATED_MESSAGE("linalg_norm is deprecated, use norm instead.")
|
577 |
+
inline Tensor linalg_norm(
|
578 |
+
const Tensor& self,
|
579 |
+
const optional<Scalar>& opt_ord,
|
580 |
+
OptionalIntArrayRef opt_dim,
|
581 |
+
bool keepdim,
|
582 |
+
optional<ScalarType> opt_dtype) {
|
583 |
+
return detail::norm(self, opt_ord, opt_dim, keepdim, opt_dtype);
|
584 |
+
}
|
585 |
+
|
586 |
+
// C10_DEPRECATED_MESSAGE("linalg_norm is deprecated, use norm instead.")
|
587 |
+
inline Tensor linalg_norm(
|
588 |
+
const Tensor& self,
|
589 |
+
c10::string_view ord,
|
590 |
+
OptionalIntArrayRef opt_dim,
|
591 |
+
bool keepdim,
|
592 |
+
optional<ScalarType> opt_dtype) {
|
593 |
+
return detail::norm(self, ord, opt_dim, keepdim, opt_dtype);
|
594 |
+
}
|
595 |
+
|
596 |
+
// C10_DEPRECATED_MESSAGE("linalg_norm_out is deprecated, use norm_out
|
597 |
+
// instead.")
|
598 |
+
inline Tensor& linalg_norm_out(
|
599 |
+
Tensor& result,
|
600 |
+
const Tensor& self,
|
601 |
+
const optional<Scalar>& opt_ord,
|
602 |
+
OptionalIntArrayRef opt_dim,
|
603 |
+
bool keepdim,
|
604 |
+
optional<ScalarType> opt_dtype) {
|
605 |
+
return detail::norm_out(result, self, opt_ord, opt_dim, keepdim, opt_dtype);
|
606 |
+
}
|
607 |
+
|
608 |
+
// C10_DEPRECATED_MESSAGE("linalg_norm_out is deprecated, use norm_out
|
609 |
+
// instead.")
|
610 |
+
inline Tensor& linalg_norm_out(
|
611 |
+
Tensor& result,
|
612 |
+
const Tensor& self,
|
613 |
+
c10::string_view ord,
|
614 |
+
OptionalIntArrayRef opt_dim,
|
615 |
+
bool keepdim,
|
616 |
+
optional<ScalarType> opt_dtype) {
|
617 |
+
return detail::norm_out(result, self, ord, opt_dim, keepdim, opt_dtype);
|
618 |
+
}
|
619 |
+
|
620 |
+
/// Computes the LU factorization with partial pivoting
|
621 |
+
///
|
622 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.lu_factor
|
623 |
+
inline std::tuple<Tensor, Tensor> lu_factor(
|
624 |
+
const Tensor& input,
|
625 |
+
const bool pivot = true) {
|
626 |
+
return detail::lu_factor(input, pivot);
|
627 |
+
}
|
628 |
+
|
629 |
+
inline std::tuple<Tensor&, Tensor&> lu_factor_out(
|
630 |
+
Tensor& LU,
|
631 |
+
Tensor& pivots,
|
632 |
+
const Tensor& self,
|
633 |
+
const bool pivot = true) {
|
634 |
+
return detail::lu_factor_out(LU, pivots, self, pivot);
|
635 |
+
}
|
636 |
+
|
637 |
+
/// Computes the LU factorization with partial pivoting
|
638 |
+
///
|
639 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.lu
|
640 |
+
inline std::tuple<Tensor, Tensor, Tensor> lu(
|
641 |
+
const Tensor& input,
|
642 |
+
const bool pivot = true) {
|
643 |
+
return detail::lu(input, pivot);
|
644 |
+
}
|
645 |
+
|
646 |
+
inline std::tuple<Tensor&, Tensor&, Tensor&> lu_out(
|
647 |
+
Tensor& P,
|
648 |
+
Tensor& L,
|
649 |
+
Tensor& U,
|
650 |
+
const Tensor& self,
|
651 |
+
const bool pivot = true) {
|
652 |
+
return detail::lu_out(P, L, U, self, pivot);
|
653 |
+
}
|
654 |
+
|
655 |
+
inline Tensor norm(
|
656 |
+
const Tensor& self,
|
657 |
+
const optional<Scalar>& opt_ord,
|
658 |
+
OptionalIntArrayRef opt_dim,
|
659 |
+
bool keepdim,
|
660 |
+
optional<ScalarType> opt_dtype) {
|
661 |
+
return detail::norm(self, opt_ord, opt_dim, keepdim, opt_dtype);
|
662 |
+
}
|
663 |
+
|
664 |
+
inline Tensor norm(
|
665 |
+
const Tensor& self,
|
666 |
+
std::string ord,
|
667 |
+
OptionalIntArrayRef opt_dim,
|
668 |
+
bool keepdim,
|
669 |
+
optional<ScalarType> opt_dtype) {
|
670 |
+
return detail::norm(self, ord, opt_dim, keepdim, opt_dtype);
|
671 |
+
}
|
672 |
+
|
673 |
+
inline Tensor& norm_out(
|
674 |
+
Tensor& result,
|
675 |
+
const Tensor& self,
|
676 |
+
const optional<Scalar>& opt_ord,
|
677 |
+
OptionalIntArrayRef opt_dim,
|
678 |
+
bool keepdim,
|
679 |
+
optional<ScalarType> opt_dtype) {
|
680 |
+
return detail::norm_out(result, self, opt_ord, opt_dim, keepdim, opt_dtype);
|
681 |
+
}
|
682 |
+
|
683 |
+
inline Tensor& norm_out(
|
684 |
+
Tensor& result,
|
685 |
+
const Tensor& self,
|
686 |
+
std::string ord,
|
687 |
+
OptionalIntArrayRef opt_dim,
|
688 |
+
bool keepdim,
|
689 |
+
optional<ScalarType> opt_dtype) {
|
690 |
+
return detail::norm_out(result, self, ord, opt_dim, keepdim, opt_dtype);
|
691 |
+
}
|
692 |
+
|
693 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.vector_norm
|
694 |
+
inline Tensor vector_norm(
|
695 |
+
const Tensor& self,
|
696 |
+
Scalar ord,
|
697 |
+
OptionalIntArrayRef opt_dim,
|
698 |
+
bool keepdim,
|
699 |
+
optional<ScalarType> opt_dtype) {
|
700 |
+
return detail::vector_norm(self, ord, opt_dim, keepdim, opt_dtype);
|
701 |
+
}
|
702 |
+
|
703 |
+
inline Tensor& vector_norm_out(
|
704 |
+
Tensor& result,
|
705 |
+
const Tensor& self,
|
706 |
+
Scalar ord,
|
707 |
+
OptionalIntArrayRef opt_dim,
|
708 |
+
bool keepdim,
|
709 |
+
optional<ScalarType> opt_dtype) {
|
710 |
+
return detail::vector_norm_out(
|
711 |
+
result, self, ord, opt_dim, keepdim, opt_dtype);
|
712 |
+
}
|
713 |
+
|
714 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_norm
|
715 |
+
inline Tensor matrix_norm(
|
716 |
+
const Tensor& self,
|
717 |
+
const Scalar& ord,
|
718 |
+
IntArrayRef dim,
|
719 |
+
bool keepdim,
|
720 |
+
optional<ScalarType> dtype) {
|
721 |
+
return detail::matrix_norm(self, ord, dim, keepdim, dtype);
|
722 |
+
}
|
723 |
+
|
724 |
+
inline Tensor& matrix_norm_out(
|
725 |
+
const Tensor& self,
|
726 |
+
const Scalar& ord,
|
727 |
+
IntArrayRef dim,
|
728 |
+
bool keepdim,
|
729 |
+
optional<ScalarType> dtype,
|
730 |
+
Tensor& result) {
|
731 |
+
return detail::matrix_norm_out(self, ord, dim, keepdim, dtype, result);
|
732 |
+
}
|
733 |
+
|
734 |
+
inline Tensor matrix_norm(
|
735 |
+
const Tensor& self,
|
736 |
+
std::string ord,
|
737 |
+
IntArrayRef dim,
|
738 |
+
bool keepdim,
|
739 |
+
optional<ScalarType> dtype) {
|
740 |
+
return detail::matrix_norm(self, ord, dim, keepdim, dtype);
|
741 |
+
}
|
742 |
+
|
743 |
+
inline Tensor& matrix_norm_out(
|
744 |
+
const Tensor& self,
|
745 |
+
std::string ord,
|
746 |
+
IntArrayRef dim,
|
747 |
+
bool keepdim,
|
748 |
+
optional<ScalarType> dtype,
|
749 |
+
Tensor& result) {
|
750 |
+
return detail::matrix_norm_out(self, ord, dim, keepdim, dtype, result);
|
751 |
+
}
|
752 |
+
|
753 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_power
|
754 |
+
inline Tensor matrix_power(const Tensor& self, int64_t n) {
|
755 |
+
return detail::matrix_power(self, n);
|
756 |
+
}
|
757 |
+
|
758 |
+
inline Tensor& matrix_power_out(const Tensor& self, int64_t n, Tensor& result) {
|
759 |
+
return detail::matrix_power_out(self, n, result);
|
760 |
+
}
|
761 |
+
|
762 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_rank
|
763 |
+
inline Tensor matrix_rank(const Tensor& input, double tol, bool hermitian) {
|
764 |
+
return detail::matrix_rank(input, tol, hermitian);
|
765 |
+
}
|
766 |
+
|
767 |
+
inline Tensor matrix_rank(
|
768 |
+
const Tensor& input,
|
769 |
+
const Tensor& tol,
|
770 |
+
bool hermitian) {
|
771 |
+
return detail::matrix_rank(input, tol, hermitian);
|
772 |
+
}
|
773 |
+
|
774 |
+
inline Tensor matrix_rank(
|
775 |
+
const Tensor& input,
|
776 |
+
c10::optional<double> atol,
|
777 |
+
c10::optional<double> rtol,
|
778 |
+
bool hermitian) {
|
779 |
+
return detail::matrix_rank(input, atol, rtol, hermitian);
|
780 |
+
}
|
781 |
+
|
782 |
+
inline Tensor matrix_rank(
|
783 |
+
const Tensor& input,
|
784 |
+
const c10::optional<Tensor>& atol,
|
785 |
+
const c10::optional<Tensor>& rtol,
|
786 |
+
bool hermitian) {
|
787 |
+
return detail::matrix_rank(input, atol, rtol, hermitian);
|
788 |
+
}
|
789 |
+
|
790 |
+
inline Tensor& matrix_rank_out(
|
791 |
+
Tensor& result,
|
792 |
+
const Tensor& input,
|
793 |
+
double tol,
|
794 |
+
bool hermitian) {
|
795 |
+
return detail::matrix_rank_out(result, input, tol, hermitian);
|
796 |
+
}
|
797 |
+
|
798 |
+
inline Tensor& matrix_rank_out(
|
799 |
+
Tensor& result,
|
800 |
+
const Tensor& input,
|
801 |
+
const Tensor& tol,
|
802 |
+
bool hermitian) {
|
803 |
+
return detail::matrix_rank_out(result, input, tol, hermitian);
|
804 |
+
}
|
805 |
+
|
806 |
+
inline Tensor& matrix_rank_out(
|
807 |
+
Tensor& result,
|
808 |
+
const Tensor& input,
|
809 |
+
c10::optional<double> atol,
|
810 |
+
c10::optional<double> rtol,
|
811 |
+
bool hermitian) {
|
812 |
+
return detail::matrix_rank_out(result, input, atol, rtol, hermitian);
|
813 |
+
}
|
814 |
+
|
815 |
+
inline Tensor& matrix_rank_out(
|
816 |
+
Tensor& result,
|
817 |
+
const Tensor& input,
|
818 |
+
const c10::optional<Tensor>& atol,
|
819 |
+
const c10::optional<Tensor>& rtol,
|
820 |
+
bool hermitian) {
|
821 |
+
return detail::matrix_rank_out(result, input, atol, rtol, hermitian);
|
822 |
+
}
|
823 |
+
|
824 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.multi_dot
|
825 |
+
inline Tensor multi_dot(TensorList tensors) {
|
826 |
+
return detail::multi_dot(tensors);
|
827 |
+
}
|
828 |
+
|
829 |
+
inline Tensor& multi_dot_out(TensorList tensors, Tensor& result) {
|
830 |
+
return detail::multi_dot_out(tensors, result);
|
831 |
+
}
|
832 |
+
|
833 |
+
/// Computes the pseudo-inverse
|
834 |
+
///
|
835 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.pinv
|
836 |
+
inline Tensor pinv(
|
837 |
+
const Tensor& input,
|
838 |
+
double rcond = 1e-15,
|
839 |
+
bool hermitian = false) {
|
840 |
+
return detail::pinv(input, rcond, hermitian);
|
841 |
+
}
|
842 |
+
|
843 |
+
inline Tensor& pinv_out(
|
844 |
+
Tensor& result,
|
845 |
+
const Tensor& input,
|
846 |
+
double rcond = 1e-15,
|
847 |
+
bool hermitian = false) {
|
848 |
+
return detail::pinv_out(result, input, rcond, hermitian);
|
849 |
+
}
|
850 |
+
|
851 |
+
/// Computes the QR decomposition
|
852 |
+
///
|
853 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.qr
|
854 |
+
inline std::tuple<Tensor, Tensor> qr(
|
855 |
+
const Tensor& input,
|
856 |
+
c10::string_view mode = "reduced") {
|
857 |
+
// C++17 Change the initialisation to "reduced"sv
|
858 |
+
// Same for qr_out
|
859 |
+
return detail::qr(input, mode);
|
860 |
+
}
|
861 |
+
|
862 |
+
inline std::tuple<Tensor&, Tensor&> qr_out(
|
863 |
+
Tensor& Q,
|
864 |
+
Tensor& R,
|
865 |
+
const Tensor& input,
|
866 |
+
c10::string_view mode = "reduced") {
|
867 |
+
return detail::qr_out(Q, R, input, mode);
|
868 |
+
}
|
869 |
+
|
870 |
+
/// Computes the LDL decomposition
|
871 |
+
///
|
872 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.ldl_factor_ex
|
873 |
+
inline std::tuple<Tensor, Tensor, Tensor> ldl_factor_ex(
|
874 |
+
const Tensor& input,
|
875 |
+
bool hermitian,
|
876 |
+
bool check_errors) {
|
877 |
+
return torch::linalg_ldl_factor_ex(input, hermitian, check_errors);
|
878 |
+
}
|
879 |
+
|
880 |
+
inline std::tuple<Tensor&, Tensor&, Tensor&> ldl_factor_ex_out(
|
881 |
+
Tensor& LD,
|
882 |
+
Tensor& pivots,
|
883 |
+
Tensor& info,
|
884 |
+
const Tensor& input,
|
885 |
+
bool hermitian,
|
886 |
+
bool check_errors) {
|
887 |
+
return torch::linalg_ldl_factor_ex_out(
|
888 |
+
LD, pivots, info, input, hermitian, check_errors);
|
889 |
+
}
|
890 |
+
|
891 |
+
/// Solve a system of linear equations using the LDL decomposition
|
892 |
+
///
|
893 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.ldl_solve
|
894 |
+
inline Tensor ldl_solve(
|
895 |
+
const Tensor& LD,
|
896 |
+
const Tensor& pivots,
|
897 |
+
const Tensor& B,
|
898 |
+
bool hermitian) {
|
899 |
+
return torch::linalg_ldl_solve(LD, pivots, B, hermitian);
|
900 |
+
}
|
901 |
+
|
902 |
+
inline Tensor& ldl_solve_out(
|
903 |
+
Tensor& result,
|
904 |
+
const Tensor& LD,
|
905 |
+
const Tensor& pivots,
|
906 |
+
const Tensor& B,
|
907 |
+
bool hermitian) {
|
908 |
+
return torch::linalg_ldl_solve_out(result, LD, pivots, B, hermitian);
|
909 |
+
}
|
910 |
+
|
911 |
+
/// Solves a system linear system AX = B
|
912 |
+
///
|
913 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.solve_ex
|
914 |
+
inline std::tuple<Tensor, Tensor> solve_ex(
|
915 |
+
const Tensor& input,
|
916 |
+
const Tensor& other,
|
917 |
+
bool left,
|
918 |
+
bool check_errors) {
|
919 |
+
return detail::solve_ex(input, other, left, check_errors);
|
920 |
+
}
|
921 |
+
|
922 |
+
inline std::tuple<Tensor&, Tensor&> solve_ex_out(
|
923 |
+
Tensor& result,
|
924 |
+
Tensor& info,
|
925 |
+
const Tensor& input,
|
926 |
+
const Tensor& other,
|
927 |
+
bool left,
|
928 |
+
bool check_errors) {
|
929 |
+
return detail::solve_ex_out(result, info, input, other, left, check_errors);
|
930 |
+
}
|
931 |
+
|
932 |
+
/// Computes a tensor `x` such that `matmul(input, x) = other`.
|
933 |
+
///
|
934 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.solve
|
935 |
+
inline Tensor solve(const Tensor& input, const Tensor& other, bool left) {
|
936 |
+
return detail::solve(input, other, left);
|
937 |
+
}
|
938 |
+
|
939 |
+
inline Tensor& solve_out(
|
940 |
+
Tensor& result,
|
941 |
+
const Tensor& input,
|
942 |
+
const Tensor& other,
|
943 |
+
bool left) {
|
944 |
+
return detail::solve_out(result, input, other, left);
|
945 |
+
}
|
946 |
+
|
947 |
+
/// Computes a solution of a linear system AX = B for input = A and other = B
|
948 |
+
/// whenever A is square upper or lower triangular and does not have zeros in
|
949 |
+
/// the diagonal
|
950 |
+
///
|
951 |
+
/// See
|
952 |
+
/// https://pytorch.org/docs/master/linalg.html#torch.linalg.solve_triangular
|
953 |
+
inline Tensor solve_triangular(
|
954 |
+
const Tensor& input,
|
955 |
+
const Tensor& other,
|
956 |
+
bool upper,
|
957 |
+
bool left,
|
958 |
+
bool unitriangular) {
|
959 |
+
return detail::solve_triangular(input, other, upper, left, unitriangular);
|
960 |
+
}
|
961 |
+
|
962 |
+
inline Tensor& solve_triangular_out(
|
963 |
+
Tensor& result,
|
964 |
+
const Tensor& input,
|
965 |
+
const Tensor& other,
|
966 |
+
bool upper,
|
967 |
+
bool left,
|
968 |
+
bool unitriangular) {
|
969 |
+
return detail::solve_triangular_out(
|
970 |
+
result, input, other, upper, left, unitriangular);
|
971 |
+
}
|
972 |
+
|
973 |
+
/// Computes the singular values and singular vectors
|
974 |
+
///
|
975 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.svd
|
976 |
+
inline std::tuple<Tensor, Tensor, Tensor> svd(
|
977 |
+
const Tensor& input,
|
978 |
+
bool full_matrices,
|
979 |
+
c10::optional<c10::string_view> driver) {
|
980 |
+
return detail::svd(input, full_matrices, driver);
|
981 |
+
}
|
982 |
+
|
983 |
+
inline std::tuple<Tensor&, Tensor&, Tensor&> svd_out(
|
984 |
+
Tensor& U,
|
985 |
+
Tensor& S,
|
986 |
+
Tensor& Vh,
|
987 |
+
const Tensor& input,
|
988 |
+
bool full_matrices,
|
989 |
+
c10::optional<c10::string_view> driver) {
|
990 |
+
return detail::svd_out(U, S, Vh, input, full_matrices, driver);
|
991 |
+
}
|
992 |
+
|
993 |
+
/// Computes the singular values
|
994 |
+
///
|
995 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.svdvals
|
996 |
+
inline Tensor svdvals(
|
997 |
+
const Tensor& input,
|
998 |
+
c10::optional<c10::string_view> driver) {
|
999 |
+
return detail::svdvals(input, driver);
|
1000 |
+
}
|
1001 |
+
|
1002 |
+
inline Tensor& svdvals_out(
|
1003 |
+
Tensor& result,
|
1004 |
+
const Tensor& input,
|
1005 |
+
c10::optional<c10::string_view> driver) {
|
1006 |
+
return detail::svdvals_out(result, input, driver);
|
1007 |
+
}
|
1008 |
+
|
1009 |
+
/// Computes the inverse of a tensor
|
1010 |
+
///
|
1011 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.tensorinv
|
1012 |
+
///
|
1013 |
+
/// Example:
|
1014 |
+
/// ```
|
1015 |
+
/// auto a = torch::eye(4*6).reshape({4, 6, 8, 3});
|
1016 |
+
/// int64_t ind = 2;
|
1017 |
+
/// auto ainv = torch::linalg::tensorinv(a, ind);
|
1018 |
+
/// ```
|
1019 |
+
inline Tensor tensorinv(const Tensor& self, int64_t ind) {
|
1020 |
+
return detail::tensorinv(self, ind);
|
1021 |
+
}
|
1022 |
+
|
1023 |
+
inline Tensor& tensorinv_out(Tensor& result, const Tensor& self, int64_t ind) {
|
1024 |
+
return detail::tensorinv_out(result, self, ind);
|
1025 |
+
}
|
1026 |
+
|
1027 |
+
/// Computes a tensor `x` such that `tensordot(input, x, dims=x.dim()) = other`.
|
1028 |
+
///
|
1029 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.tensorsolve
|
1030 |
+
///
|
1031 |
+
/// Example:
|
1032 |
+
/// ```
|
1033 |
+
/// auto a = torch::eye(2*3*4).reshape({2*3, 4, 2, 3, 4});
|
1034 |
+
/// auto b = torch::randn(2*3, 4);
|
1035 |
+
/// auto x = torch::linalg::tensorsolve(a, b);
|
1036 |
+
/// ```
|
1037 |
+
inline Tensor tensorsolve(
|
1038 |
+
const Tensor& input,
|
1039 |
+
const Tensor& other,
|
1040 |
+
OptionalIntArrayRef dims) {
|
1041 |
+
return detail::tensorsolve(input, other, dims);
|
1042 |
+
}
|
1043 |
+
|
1044 |
+
inline Tensor& tensorsolve_out(
|
1045 |
+
Tensor& result,
|
1046 |
+
const Tensor& input,
|
1047 |
+
const Tensor& other,
|
1048 |
+
OptionalIntArrayRef dims) {
|
1049 |
+
return detail::tensorsolve_out(result, input, other, dims);
|
1050 |
+
}
|
1051 |
+
|
1052 |
+
/// Computes a tensor `inverse_input` such that `dot(input, inverse_input) =
|
1053 |
+
/// eye(input.size(0))`.
|
1054 |
+
///
|
1055 |
+
/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.inv
|
1056 |
+
inline Tensor inv(const Tensor& input) {
|
1057 |
+
return detail::inv(input);
|
1058 |
+
}
|
1059 |
+
|
1060 |
+
inline Tensor& inv_out(Tensor& result, const Tensor& input) {
|
1061 |
+
return detail::inv_out(result, input);
|
1062 |
+
}
|
1063 |
+
|
1064 |
+
} // namespace linalg
|
1065 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/mps.h
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
|
5 |
+
#include <cstddef>
|
6 |
+
#include <cstdint>
|
7 |
+
|
8 |
+
#ifdef __OBJC__
|
9 |
+
#include <Foundation/Foundation.h>
|
10 |
+
#include <Metal/Metal.h>
|
11 |
+
using MTLCommandBuffer_t = id<MTLCommandBuffer>;
|
12 |
+
using DispatchQueue_t = dispatch_queue_t;
|
13 |
+
#else
|
14 |
+
using MTLCommandBuffer_t = void*;
|
15 |
+
using DispatchQueue_t = void*;
|
16 |
+
#endif
|
17 |
+
|
18 |
+
namespace torch {
|
19 |
+
namespace mps {
|
20 |
+
|
21 |
+
/// Returns true if MPS device is available.
|
22 |
+
bool TORCH_API is_available();
|
23 |
+
|
24 |
+
/// Sets the RNG seed for the MPS device.
|
25 |
+
void TORCH_API manual_seed(uint64_t seed);
|
26 |
+
|
27 |
+
/// Waits for all streams on the MPS device to complete.
|
28 |
+
/// This blocks the calling CPU thread by using the 'waitUntilCompleted()'
|
29 |
+
/// method to wait for Metal command buffers finish executing all the
|
30 |
+
/// encoded GPU operations before returning.
|
31 |
+
void TORCH_API synchronize();
|
32 |
+
|
33 |
+
/// Submits the currently active command buffer to run on the MPS device.
|
34 |
+
void TORCH_API commit();
|
35 |
+
|
36 |
+
/// Get the current command buffer to encode the Metal commands.
|
37 |
+
MTLCommandBuffer_t TORCH_API get_command_buffer();
|
38 |
+
|
39 |
+
/// Get the dispatch_queue_t to synchronize encoding the custom kernels
|
40 |
+
/// with the PyTorch MPS backend.
|
41 |
+
DispatchQueue_t TORCH_API get_dispatch_queue();
|
42 |
+
|
43 |
+
} // namespace mps
|
44 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nested.h
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/ATen.h>
|
4 |
+
#include <ATen/core/ATen_fwd.h>
|
5 |
+
#include <torch/csrc/api/include/torch/detail/TensorDataContainer.h>
|
6 |
+
#include <algorithm>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace nested {
|
10 |
+
|
11 |
+
/// Nested tensor
|
12 |
+
///
|
13 |
+
/// See
|
14 |
+
/// https://pytorch.org/docs/master/nested.html#torch.nested.nested_tensor
|
15 |
+
///
|
16 |
+
/// ```
|
17 |
+
// implemented on python object to allow torch.nested.nested_tensor to be
|
18 |
+
// constructed with arbitrarily nested python objects - for now, only arbitrary
|
19 |
+
// python lists and lists of Tensors
|
20 |
+
// See torch/csrc/autograd/python_nested_functions_manual.cpp for Python
|
21 |
+
// implementation
|
22 |
+
// See here for C++ implementation
|
23 |
+
inline at::Tensor nested_tensor(
|
24 |
+
at::TensorList nested_tensor_data,
|
25 |
+
const at::TensorOptions& options = {}) {
|
26 |
+
auto out = at::_nested_tensor_from_tensor_list(
|
27 |
+
nested_tensor_data,
|
28 |
+
c10::typeMetaToScalarType(options.dtype()),
|
29 |
+
c10::nullopt,
|
30 |
+
options.device(),
|
31 |
+
options.pinned_memory());
|
32 |
+
if (options.has_requires_grad() && options.requires_grad()) {
|
33 |
+
out.requires_grad_(true);
|
34 |
+
}
|
35 |
+
return out;
|
36 |
+
}
|
37 |
+
|
38 |
+
inline at::Tensor nested_tensor(
|
39 |
+
at::ArrayRef<detail::TensorDataContainer> nested_tensor_data,
|
40 |
+
const at::TensorOptions& options = {}) {
|
41 |
+
for (const auto& tdc : nested_tensor_data) {
|
42 |
+
TORCH_CHECK(
|
43 |
+
tdc.is_init_list(),
|
44 |
+
"nested_tensor() not implemented for these parameters");
|
45 |
+
}
|
46 |
+
// Construct a TensorList using nested_tensor_data
|
47 |
+
std::vector<at::Tensor> tensor_list(nested_tensor_data.size());
|
48 |
+
std::transform(
|
49 |
+
nested_tensor_data.begin(),
|
50 |
+
nested_tensor_data.end(),
|
51 |
+
tensor_list.begin(),
|
52 |
+
[&](const detail::TensorDataContainer& tdc) {
|
53 |
+
return tdc.convert_to_tensor(options);
|
54 |
+
});
|
55 |
+
auto out = at::_nested_tensor_from_tensor_list(
|
56 |
+
tensor_list,
|
57 |
+
c10::typeMetaToScalarType(options.dtype()),
|
58 |
+
c10::nullopt,
|
59 |
+
options.device(),
|
60 |
+
options.pinned_memory());
|
61 |
+
if (options.has_requires_grad() && options.requires_grad()) {
|
62 |
+
out.requires_grad_(true);
|
63 |
+
}
|
64 |
+
return out;
|
65 |
+
}
|
66 |
+
|
67 |
+
/// As Nested Tensor
|
68 |
+
///
|
69 |
+
/// See
|
70 |
+
/// https://pytorch.org/docs/master/nested.html#torch.nested.as_nested_tensor
|
71 |
+
///
|
72 |
+
/// ```
|
73 |
+
inline at::Tensor as_nested_tensor(
|
74 |
+
at::TensorList list,
|
75 |
+
c10::optional<at::ScalarType> dtype = c10::nullopt,
|
76 |
+
c10::optional<at::Device> device = c10::nullopt) {
|
77 |
+
return at::_nested_tensor_from_tensor_list(
|
78 |
+
list, dtype, c10::nullopt, device, c10::nullopt);
|
79 |
+
}
|
80 |
+
|
81 |
+
/// Nested to padded tensor
|
82 |
+
///
|
83 |
+
/// See
|
84 |
+
/// https://pytorch.org/docs/master/nested.html#torch.nested.to_padded_tensor
|
85 |
+
///
|
86 |
+
/// ```
|
87 |
+
inline at::Tensor to_padded_tensor(
|
88 |
+
const at::Tensor& self,
|
89 |
+
double padding,
|
90 |
+
at::OptionalIntArrayRef output_size = c10::nullopt) {
|
91 |
+
return at::nested_to_padded_tensor(self, padding, output_size);
|
92 |
+
}
|
93 |
+
|
94 |
+
} // namespace nested
|
95 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn.h
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/nn/cloneable.h>
|
4 |
+
#include <torch/nn/functional.h>
|
5 |
+
#include <torch/nn/init.h>
|
6 |
+
#include <torch/nn/module.h>
|
7 |
+
#include <torch/nn/modules.h>
|
8 |
+
#include <torch/nn/options.h>
|
9 |
+
#include <torch/nn/pimpl.h>
|
10 |
+
#include <torch/nn/utils.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/nn/module.h>
|
4 |
+
#include <torch/types.h>
|
5 |
+
#include <torch/utils.h>
|
6 |
+
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Exception.h>
|
9 |
+
|
10 |
+
#include <memory>
|
11 |
+
#include <utility>
|
12 |
+
|
13 |
+
namespace torch {
|
14 |
+
namespace nn {
|
15 |
+
/// The `clone()` method in the base `Module` class does not have knowledge of
|
16 |
+
/// the concrete runtime type of its subclasses. Therefore, `clone()` must
|
17 |
+
/// either be called from within the subclass, or from a base class that has
|
18 |
+
/// knowledge of the concrete type. `Cloneable` uses the CRTP to gain
|
19 |
+
/// knowledge of the subclass' static type and provide an implementation of the
|
20 |
+
/// `clone()` method. We do not want to use this pattern in the base class,
|
21 |
+
/// because then storing a module would always require templatizing it.
|
22 |
+
template <typename Derived>
|
23 |
+
// NOLINTNEXTLINE(bugprone-exception-escape)
|
24 |
+
class Cloneable : public Module {
|
25 |
+
public:
|
26 |
+
using Module::Module;
|
27 |
+
|
28 |
+
/// `reset()` must perform initialization of all members with reference
|
29 |
+
/// semantics, most importantly parameters, buffers and submodules.
|
30 |
+
virtual void reset() = 0;
|
31 |
+
|
32 |
+
/// Performs a recursive "deep copy" of the `Module`, such that all parameters
|
33 |
+
/// and submodules in the cloned module are different from those in the
|
34 |
+
/// original module.
|
35 |
+
std::shared_ptr<Module> clone(
|
36 |
+
const optional<Device>& device = nullopt) const override {
|
37 |
+
NoGradGuard no_grad;
|
38 |
+
|
39 |
+
const auto& self = static_cast<const Derived&>(*this);
|
40 |
+
auto copy = std::make_shared<Derived>(self);
|
41 |
+
copy->parameters_.clear();
|
42 |
+
copy->buffers_.clear();
|
43 |
+
copy->children_.clear();
|
44 |
+
copy->reset();
|
45 |
+
TORCH_CHECK(
|
46 |
+
copy->parameters_.size() == parameters_.size(),
|
47 |
+
"The cloned module does not have the same number of "
|
48 |
+
"parameters as the original module after calling reset(). "
|
49 |
+
"Are you sure you called register_parameter() inside reset() "
|
50 |
+
"and not the constructor?");
|
51 |
+
for (const auto& parameter : named_parameters(/*recurse=*/false)) {
|
52 |
+
auto& tensor = *parameter;
|
53 |
+
auto data = device && tensor.device() != *device
|
54 |
+
? tensor.to(*device)
|
55 |
+
: autograd::Variable(tensor).clone();
|
56 |
+
copy->parameters_[parameter.key()].set_data(data);
|
57 |
+
}
|
58 |
+
TORCH_CHECK(
|
59 |
+
copy->buffers_.size() == buffers_.size(),
|
60 |
+
"The cloned module does not have the same number of "
|
61 |
+
"buffers as the original module after calling reset(). "
|
62 |
+
"Are you sure you called register_buffer() inside reset() "
|
63 |
+
"and not the constructor?");
|
64 |
+
for (const auto& buffer : named_buffers(/*recurse=*/false)) {
|
65 |
+
auto& tensor = *buffer;
|
66 |
+
auto data = device && tensor.device() != *device
|
67 |
+
? tensor.to(*device)
|
68 |
+
: autograd::Variable(tensor).clone();
|
69 |
+
copy->buffers_[buffer.key()].set_data(data);
|
70 |
+
}
|
71 |
+
TORCH_CHECK(
|
72 |
+
copy->children_.size() == children_.size(),
|
73 |
+
"The cloned module does not have the same number of "
|
74 |
+
"child modules as the original module after calling reset(). "
|
75 |
+
"Are you sure you called register_module() inside reset() "
|
76 |
+
"and not the constructor?");
|
77 |
+
for (const auto& child : children_) {
|
78 |
+
copy->children_[child.key()]->clone_(*child.value(), device);
|
79 |
+
}
|
80 |
+
return copy;
|
81 |
+
}
|
82 |
+
|
83 |
+
private:
|
84 |
+
void clone_(Module& other, const optional<Device>& device) final {
|
85 |
+
// Here we are *pretty* certain that `other's` type is `Derived` (because it
|
86 |
+
// was registered under the same name as `this`), but you never know what
|
87 |
+
// crazy things `reset()` does, so `dynamic_cast` just to be safe.
|
88 |
+
auto clone = std::dynamic_pointer_cast<Derived>(other.clone(device));
|
89 |
+
TORCH_CHECK(
|
90 |
+
clone != nullptr,
|
91 |
+
"Attempted to clone submodule, but it is of a "
|
92 |
+
"different type than the submodule it was to be cloned into");
|
93 |
+
static_cast<Derived&>(*this) = *clone;
|
94 |
+
}
|
95 |
+
};
|
96 |
+
|
97 |
+
} // namespace nn
|
98 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/nn/functional/batchnorm.h>
|
4 |
+
#include <torch/nn/functional/conv.h>
|
5 |
+
#include <torch/nn/functional/distance.h>
|
6 |
+
#include <torch/nn/functional/dropout.h>
|
7 |
+
#include <torch/nn/functional/embedding.h>
|
8 |
+
#include <torch/nn/functional/fold.h>
|
9 |
+
#include <torch/nn/functional/instancenorm.h>
|
10 |
+
#include <torch/nn/functional/linear.h>
|
11 |
+
#include <torch/nn/functional/loss.h>
|
12 |
+
#include <torch/nn/functional/normalization.h>
|
13 |
+
#include <torch/nn/functional/padding.h>
|
14 |
+
#include <torch/nn/functional/pixelshuffle.h>
|
15 |
+
#include <torch/nn/functional/pooling.h>
|
16 |
+
#include <torch/nn/functional/upsampling.h>
|
17 |
+
#include <torch/nn/functional/vision.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/activation.h
ADDED
@@ -0,0 +1,966 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/Dispatch.h>
|
4 |
+
#include <torch/nn/functional/dropout.h>
|
5 |
+
#include <torch/nn/functional/linear.h>
|
6 |
+
#include <torch/nn/options/activation.h>
|
7 |
+
#include <torch/nn/options/dropout.h>
|
8 |
+
#include <torch/nn/options/linear.h>
|
9 |
+
#include <torch/types.h>
|
10 |
+
#include <limits>
|
11 |
+
#include <utility>
|
12 |
+
|
13 |
+
namespace torch {
|
14 |
+
namespace nn {
|
15 |
+
namespace functional {
|
16 |
+
|
17 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
18 |
+
namespace detail {
|
19 |
+
inline Tensor elu(Tensor input, double alpha, bool inplace) {
|
20 |
+
if (inplace) {
|
21 |
+
return torch::elu_(input, alpha);
|
22 |
+
} else {
|
23 |
+
return torch::elu(input, alpha);
|
24 |
+
}
|
25 |
+
}
|
26 |
+
} // namespace detail
|
27 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
28 |
+
|
29 |
+
/// See
|
30 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.elu
|
31 |
+
/// about the exact behavior of this functional.
|
32 |
+
///
|
33 |
+
/// See the documentation for `torch::nn::functional::ELUFuncOptions` class to
|
34 |
+
/// learn what optional arguments are supported for this functional.
|
35 |
+
///
|
36 |
+
/// Example:
|
37 |
+
/// ```
|
38 |
+
/// namespace F = torch::nn::functional;
|
39 |
+
/// F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true));
|
40 |
+
/// ```
|
41 |
+
inline Tensor elu(Tensor input, const ELUFuncOptions& options = {}) {
|
42 |
+
return detail::elu(std::move(input), options.alpha(), options.inplace());
|
43 |
+
}
|
44 |
+
|
45 |
+
// ============================================================================
|
46 |
+
|
47 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
48 |
+
namespace detail {
|
49 |
+
inline Tensor selu(Tensor input, bool inplace) {
|
50 |
+
if (inplace) {
|
51 |
+
return torch::selu_(input);
|
52 |
+
} else {
|
53 |
+
return torch::selu(input);
|
54 |
+
}
|
55 |
+
}
|
56 |
+
} // namespace detail
|
57 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
58 |
+
|
59 |
+
/// See
|
60 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.selu
|
61 |
+
/// about the exact behavior of this functional.
|
62 |
+
///
|
63 |
+
/// See the documentation for `torch::nn::functional::SELUFuncOptions` class to
|
64 |
+
/// learn what optional arguments are supported for this functional.
|
65 |
+
///
|
66 |
+
/// Example:
|
67 |
+
/// ```
|
68 |
+
/// namespace F = torch::nn::functional;
|
69 |
+
/// F::selu(input, F::SELUFuncOptions(false));
|
70 |
+
/// ```
|
71 |
+
inline Tensor selu(Tensor input, const SELUFuncOptions& options = {}) {
|
72 |
+
return detail::selu(std::move(input), options.inplace());
|
73 |
+
}
|
74 |
+
|
75 |
+
// ============================================================================
|
76 |
+
|
77 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
78 |
+
namespace detail {
|
79 |
+
inline Tensor hardshrink(const Tensor& input, double lambda) {
|
80 |
+
return torch::hardshrink(input, lambda);
|
81 |
+
}
|
82 |
+
} // namespace detail
|
83 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
84 |
+
|
85 |
+
/// See
|
86 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hardshrink
|
87 |
+
/// about the exact behavior of this functional.
|
88 |
+
///
|
89 |
+
/// See the documentation for `torch::nn::functional::HardshrinkFuncOptions`
|
90 |
+
/// class to learn what optional arguments are supported for this functional.
|
91 |
+
///
|
92 |
+
/// Example:
|
93 |
+
/// ```
|
94 |
+
/// namespace F = torch::nn::functional;
|
95 |
+
/// F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42));
|
96 |
+
/// ```
|
97 |
+
inline Tensor hardshrink(
|
98 |
+
const Tensor& input,
|
99 |
+
const HardshrinkFuncOptions& options = {}) {
|
100 |
+
return detail::hardshrink(input, options.lambda());
|
101 |
+
}
|
102 |
+
|
103 |
+
// ============================================================================
|
104 |
+
|
105 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
106 |
+
namespace detail {
|
107 |
+
inline Tensor hardtanh(
|
108 |
+
Tensor input,
|
109 |
+
double min_val,
|
110 |
+
double max_val,
|
111 |
+
bool inplace) {
|
112 |
+
if (inplace) {
|
113 |
+
return torch::hardtanh_(input, min_val, max_val);
|
114 |
+
} else {
|
115 |
+
return torch::hardtanh(input, min_val, max_val);
|
116 |
+
}
|
117 |
+
}
|
118 |
+
} // namespace detail
|
119 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
120 |
+
|
121 |
+
/// See
|
122 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hardtanh
|
123 |
+
/// about the exact behavior of this functional.
|
124 |
+
///
|
125 |
+
/// See the documentation for `torch::nn::functional::HardtanhFuncOptions` class
|
126 |
+
/// to learn what optional arguments are supported for this functional.
|
127 |
+
///
|
128 |
+
/// Example:
|
129 |
+
/// ```
|
130 |
+
/// namespace F = torch::nn::functional;
|
131 |
+
/// F::hardtanh(x,
|
132 |
+
/// F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true));
|
133 |
+
/// ```
|
134 |
+
inline Tensor hardtanh(Tensor input, const HardtanhFuncOptions& options = {}) {
|
135 |
+
return detail::hardtanh(
|
136 |
+
std::move(input),
|
137 |
+
options.min_val(),
|
138 |
+
options.max_val(),
|
139 |
+
options.inplace());
|
140 |
+
}
|
141 |
+
|
142 |
+
// ============================================================================
|
143 |
+
|
144 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
145 |
+
namespace detail {
|
146 |
+
inline Tensor leaky_relu(Tensor input, double negative_slope, bool inplace) {
|
147 |
+
if (inplace) {
|
148 |
+
return torch::leaky_relu_(input, negative_slope);
|
149 |
+
} else {
|
150 |
+
return torch::leaky_relu(input, negative_slope);
|
151 |
+
}
|
152 |
+
}
|
153 |
+
} // namespace detail
|
154 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
155 |
+
|
156 |
+
/// See
|
157 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.leaky_relu
|
158 |
+
/// about the exact behavior of this functional.
|
159 |
+
///
|
160 |
+
/// See the documentation for `torch::nn::functional::LeakyReLUFuncOptions`
|
161 |
+
/// class to learn what optional arguments are supported for this functional.
|
162 |
+
///
|
163 |
+
/// Example:
|
164 |
+
/// ```
|
165 |
+
/// namespace F = torch::nn::functional;
|
166 |
+
/// F::leaky_relu(x,
|
167 |
+
/// F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true));
|
168 |
+
/// ```
|
169 |
+
inline Tensor leaky_relu(
|
170 |
+
Tensor input,
|
171 |
+
const LeakyReLUFuncOptions& options = {}) {
|
172 |
+
return detail::leaky_relu(
|
173 |
+
std::move(input), options.negative_slope(), options.inplace());
|
174 |
+
}
|
175 |
+
|
176 |
+
// ============================================================================
|
177 |
+
|
178 |
+
inline Tensor logsigmoid(const Tensor& input) {
|
179 |
+
return torch::log_sigmoid(input);
|
180 |
+
}
|
181 |
+
|
182 |
+
// ============================================================================
|
183 |
+
|
184 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
185 |
+
namespace detail {
|
186 |
+
inline Tensor gumbel_softmax(
|
187 |
+
const Tensor& logits,
|
188 |
+
double tau,
|
189 |
+
bool hard,
|
190 |
+
int dim) {
|
191 |
+
auto gumbels =
|
192 |
+
-torch::empty_like(logits).exponential_().log(); // ~Gumbel(0,1)
|
193 |
+
gumbels = (logits + gumbels) / tau; // ~Gumbel(logits, tau)
|
194 |
+
auto y_soft = gumbels.softmax(dim);
|
195 |
+
|
196 |
+
torch::Tensor ret;
|
197 |
+
if (hard) {
|
198 |
+
// Straight through.
|
199 |
+
auto index = std::get<1>(y_soft.max(dim, /*keepdim=*/true));
|
200 |
+
auto y_hard = torch::zeros_like(logits).scatter_(dim, index, 1.0);
|
201 |
+
ret = y_hard - y_soft.detach() + y_soft;
|
202 |
+
} else {
|
203 |
+
ret = y_soft;
|
204 |
+
}
|
205 |
+
return ret;
|
206 |
+
}
|
207 |
+
} // namespace detail
|
208 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
209 |
+
|
210 |
+
/// See
|
211 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.gumbel_softmax
|
212 |
+
/// about the exact behavior of this functional.
|
213 |
+
///
|
214 |
+
/// See the documentation for `torch::nn::functional::GumbelSoftmaxFuncOptions`
|
215 |
+
/// class to learn what optional arguments are supported for this functional.
|
216 |
+
///
|
217 |
+
/// Example:
|
218 |
+
/// ```
|
219 |
+
/// namespace F = torch::nn::functional;
|
220 |
+
/// F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(-1));
|
221 |
+
/// ```
|
222 |
+
inline Tensor gumbel_softmax(
|
223 |
+
const Tensor& logits,
|
224 |
+
const GumbelSoftmaxFuncOptions& options = {}) {
|
225 |
+
return detail::gumbel_softmax(
|
226 |
+
logits, options.tau(), options.hard(), options.dim());
|
227 |
+
}
|
228 |
+
|
229 |
+
// ============================================================================
|
230 |
+
|
231 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
232 |
+
namespace detail {
|
233 |
+
inline Tensor softmax(
|
234 |
+
const Tensor& input,
|
235 |
+
int64_t dim,
|
236 |
+
c10::optional<torch::Dtype> dtype) {
|
237 |
+
Tensor ret;
|
238 |
+
|
239 |
+
if (dtype == c10::nullopt) {
|
240 |
+
ret = input.softmax(dim);
|
241 |
+
} else {
|
242 |
+
ret = input.softmax(dim, dtype);
|
243 |
+
}
|
244 |
+
|
245 |
+
return ret;
|
246 |
+
}
|
247 |
+
} // namespace detail
|
248 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
249 |
+
|
250 |
+
/// See
|
251 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softmax
|
252 |
+
/// about the exact behavior of this functional.
|
253 |
+
///
|
254 |
+
/// See the documentation for `torch::nn::functional::SoftmaxFuncOptions` class
|
255 |
+
/// to learn what optional arguments are supported for this functional.
|
256 |
+
///
|
257 |
+
/// Example:
|
258 |
+
/// ```
|
259 |
+
/// namespace F = torch::nn::functional;
|
260 |
+
/// F::softmax(input, F::SoftmaxFuncOptions(1));
|
261 |
+
/// ```
|
262 |
+
inline Tensor softmax(const Tensor& input, const SoftmaxFuncOptions& options) {
|
263 |
+
return detail::softmax(input, options.dim(), options.dtype());
|
264 |
+
}
|
265 |
+
|
266 |
+
// ============================================================================
|
267 |
+
|
268 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
269 |
+
namespace detail {
|
270 |
+
inline Tensor softmin(
|
271 |
+
const Tensor& input,
|
272 |
+
int64_t dim,
|
273 |
+
c10::optional<torch::Dtype> dtype) {
|
274 |
+
Tensor ret;
|
275 |
+
|
276 |
+
if (dtype == c10::nullopt) {
|
277 |
+
ret = (-input).softmax(dim);
|
278 |
+
} else {
|
279 |
+
ret = (-input).softmax(dim, dtype);
|
280 |
+
}
|
281 |
+
|
282 |
+
return ret;
|
283 |
+
}
|
284 |
+
} // namespace detail
|
285 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
286 |
+
|
287 |
+
/// See
|
288 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softmin
|
289 |
+
/// about the exact behavior of this functional.
|
290 |
+
///
|
291 |
+
/// See the documentation for `torch::nn::functional::SoftminFuncOptions` class
|
292 |
+
/// to learn what optional arguments are supported for this functional.
|
293 |
+
///
|
294 |
+
/// Example:
|
295 |
+
/// ```
|
296 |
+
/// namespace F = torch::nn::functional;
|
297 |
+
/// F::softmin(input, F::SoftminFuncOptions(1));
|
298 |
+
/// ```
|
299 |
+
inline Tensor softmin(const Tensor& input, const SoftminFuncOptions& options) {
|
300 |
+
return detail::softmin(input, options.dim(), options.dtype());
|
301 |
+
}
|
302 |
+
|
303 |
+
// ============================================================================
|
304 |
+
|
305 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
306 |
+
namespace detail {
|
307 |
+
inline Tensor log_softmax(
|
308 |
+
const Tensor& input,
|
309 |
+
int64_t dim,
|
310 |
+
c10::optional<torch::Dtype> dtype) {
|
311 |
+
Tensor ret;
|
312 |
+
|
313 |
+
if (dtype == c10::nullopt) {
|
314 |
+
ret = input.log_softmax(dim);
|
315 |
+
} else {
|
316 |
+
ret = input.log_softmax(dim, dtype);
|
317 |
+
}
|
318 |
+
|
319 |
+
return ret;
|
320 |
+
}
|
321 |
+
} // namespace detail
|
322 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
323 |
+
|
324 |
+
/// See
|
325 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.log_softmax
|
326 |
+
/// about the exact behavior of this functional.
|
327 |
+
///
|
328 |
+
/// See the documentation for `torch::nn::functional::LogSoftmaxFuncOptions`
|
329 |
+
/// class to learn what optional arguments are supported for this functional.
|
330 |
+
///
|
331 |
+
/// Example:
|
332 |
+
/// ```
|
333 |
+
/// namespace F = torch::nn::functional;
|
334 |
+
/// F::log_softmax(input, LogSoftmaxFuncOptions(1));
|
335 |
+
/// ```
|
336 |
+
inline Tensor log_softmax(
|
337 |
+
const Tensor& input,
|
338 |
+
const LogSoftmaxFuncOptions& options) {
|
339 |
+
return detail::log_softmax(input, options.dim(), options.dtype());
|
340 |
+
}
|
341 |
+
|
342 |
+
// ============================================================================
|
343 |
+
|
344 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
345 |
+
namespace detail {
|
346 |
+
inline Tensor glu(const Tensor& input, int64_t dim) {
|
347 |
+
TORCH_CHECK(
|
348 |
+
input.dim() != 0,
|
349 |
+
"glu does not suppport scalars because halving size must be even");
|
350 |
+
return torch::glu(input, dim);
|
351 |
+
}
|
352 |
+
} // namespace detail
|
353 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
354 |
+
|
355 |
+
/// See
|
356 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.glu
|
357 |
+
/// about the exact behavior of this functional.
|
358 |
+
///
|
359 |
+
/// See the documentation for `torch::nn::functional::GLUFuncOptions` class to
|
360 |
+
/// learn what optional arguments are supported for this functional.
|
361 |
+
///
|
362 |
+
/// Example:
|
363 |
+
/// ```
|
364 |
+
/// namespace F = torch::nn::functional;
|
365 |
+
/// F::glu(input, GLUFuncOptions(1));
|
366 |
+
/// ```
|
367 |
+
inline Tensor glu(const Tensor& input, const GLUFuncOptions& options = {}) {
|
368 |
+
return detail::glu(input, options.dim());
|
369 |
+
}
|
370 |
+
|
371 |
+
// ============================================================================
|
372 |
+
|
373 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
374 |
+
namespace detail {
|
375 |
+
inline Tensor gelu(const Tensor& input, string approximate) {
|
376 |
+
return torch::gelu(input, approximate);
|
377 |
+
}
|
378 |
+
} // namespace detail
|
379 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
380 |
+
|
381 |
+
inline Tensor gelu(const Tensor& input, const GELUFuncOptions& options = {}) {
|
382 |
+
return detail::gelu(input, options.approximate());
|
383 |
+
}
|
384 |
+
|
385 |
+
// ============================================================================
|
386 |
+
|
387 |
+
inline Tensor silu(const Tensor& input) {
|
388 |
+
return torch::silu(input);
|
389 |
+
}
|
390 |
+
|
391 |
+
// ============================================================================
|
392 |
+
|
393 |
+
inline Tensor mish(const Tensor& input) {
|
394 |
+
return torch::mish(input);
|
395 |
+
}
|
396 |
+
|
397 |
+
// ============================================================================
|
398 |
+
|
399 |
+
inline Tensor prelu(const Tensor& input, const Tensor& weight) {
|
400 |
+
return torch::prelu(input, weight);
|
401 |
+
}
|
402 |
+
|
403 |
+
// ============================================================================
|
404 |
+
|
405 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
406 |
+
namespace detail {
|
407 |
+
inline Tensor relu(Tensor input, bool inplace) {
|
408 |
+
if (inplace) {
|
409 |
+
return torch::relu_(input);
|
410 |
+
} else {
|
411 |
+
return torch::relu(input);
|
412 |
+
}
|
413 |
+
}
|
414 |
+
} // namespace detail
|
415 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
416 |
+
|
417 |
+
/// See
|
418 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.relu
|
419 |
+
/// about the exact behavior of this functional.
|
420 |
+
///
|
421 |
+
/// See the documentation for `torch::nn::functional::ReLUFuncOptions` class to
|
422 |
+
/// learn what optional arguments are supported for this functional.
|
423 |
+
///
|
424 |
+
/// Example:
|
425 |
+
/// ```
|
426 |
+
/// namespace F = torch::nn::functional;
|
427 |
+
/// F::relu(x, F::ReLUFuncOptions().inplace(true));
|
428 |
+
/// ```
|
429 |
+
inline Tensor relu(Tensor input, const ReLUFuncOptions& options = {}) {
|
430 |
+
return detail::relu(std::move(input), options.inplace());
|
431 |
+
}
|
432 |
+
|
433 |
+
// ============================================================================
|
434 |
+
|
435 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
436 |
+
namespace detail {
|
437 |
+
inline Tensor relu6(Tensor input, bool inplace) {
|
438 |
+
if (inplace) {
|
439 |
+
return torch::relu6_(input);
|
440 |
+
} else {
|
441 |
+
return torch::relu6(input);
|
442 |
+
}
|
443 |
+
}
|
444 |
+
} // namespace detail
|
445 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
446 |
+
|
447 |
+
/// See
|
448 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.relu6
|
449 |
+
/// about the exact behavior of this functional.
|
450 |
+
///
|
451 |
+
/// See the documentation for `torch::nn::functional::ReLU6FuncOptions` class to
|
452 |
+
/// learn what optional arguments are supported for this functional.
|
453 |
+
///
|
454 |
+
/// Example:
|
455 |
+
/// ```
|
456 |
+
/// namespace F = torch::nn::functional;
|
457 |
+
/// F::relu6(x, F::ReLU6FuncOptions().inplace(true));
|
458 |
+
/// ```
|
459 |
+
inline Tensor relu6(Tensor input, const ReLU6FuncOptions& options = {}) {
|
460 |
+
return detail::relu6(std::move(input), options.inplace());
|
461 |
+
}
|
462 |
+
|
463 |
+
// ============================================================================
|
464 |
+
|
465 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
466 |
+
namespace detail {
|
467 |
+
inline Tensor rrelu(
|
468 |
+
Tensor input,
|
469 |
+
double lower,
|
470 |
+
double upper,
|
471 |
+
bool training,
|
472 |
+
bool inplace) {
|
473 |
+
if (inplace) {
|
474 |
+
return torch::rrelu_(input, lower, upper, training);
|
475 |
+
} else {
|
476 |
+
return torch::rrelu(input, lower, upper, training);
|
477 |
+
}
|
478 |
+
}
|
479 |
+
} // namespace detail
|
480 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
481 |
+
|
482 |
+
/// See
|
483 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.rrelu
|
484 |
+
/// about the exact behavior of this functional.
|
485 |
+
///
|
486 |
+
/// See the documentation for `torch::nn::functional::RReLUFuncOptions` class to
|
487 |
+
/// learn what optional arguments are supported for this functional.
|
488 |
+
///
|
489 |
+
/// Example:
|
490 |
+
/// ```
|
491 |
+
/// namespace F = torch::nn::functional;
|
492 |
+
/// F::rrelu(x, F::RReLUFuncOptions().lower(0.1).upper(0.4).inplace(true));
|
493 |
+
/// ```
|
494 |
+
inline Tensor rrelu(Tensor input, const RReLUFuncOptions& options = {}) {
|
495 |
+
return detail::rrelu(
|
496 |
+
std::move(input),
|
497 |
+
options.lower(),
|
498 |
+
options.upper(),
|
499 |
+
options.training(),
|
500 |
+
options.inplace());
|
501 |
+
}
|
502 |
+
|
503 |
+
// ============================================================================
|
504 |
+
|
505 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
506 |
+
namespace detail {
|
507 |
+
inline Tensor celu(Tensor input, double alpha, bool inplace) {
|
508 |
+
if (inplace) {
|
509 |
+
return torch::celu_(input, alpha);
|
510 |
+
} else {
|
511 |
+
return torch::celu(input, alpha);
|
512 |
+
}
|
513 |
+
}
|
514 |
+
} // namespace detail
|
515 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
516 |
+
|
517 |
+
/// See
|
518 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.celu
|
519 |
+
/// about the exact behavior of this functional.
|
520 |
+
///
|
521 |
+
/// See the documentation for `torch::nn::functional::CELUFuncOptions` class to
|
522 |
+
/// learn what optional arguments are supported for this functional.
|
523 |
+
///
|
524 |
+
/// Example:
|
525 |
+
/// ```
|
526 |
+
/// namespace F = torch::nn::functional;
|
527 |
+
/// F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true));
|
528 |
+
/// ```
|
529 |
+
inline Tensor celu(Tensor input, const CELUFuncOptions& options = {}) {
|
530 |
+
return detail::celu(std::move(input), options.alpha(), options.inplace());
|
531 |
+
}
|
532 |
+
|
533 |
+
// ============================================================================
|
534 |
+
|
535 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
536 |
+
namespace detail {
|
537 |
+
inline Tensor softplus(const Tensor& input, double beta, double threshold) {
|
538 |
+
return torch::softplus(input, beta, threshold);
|
539 |
+
}
|
540 |
+
} // namespace detail
|
541 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
542 |
+
|
543 |
+
/// See
|
544 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softplus
|
545 |
+
/// about the exact behavior of this functional.
|
546 |
+
///
|
547 |
+
/// See the documentation for `torch::nn::functional::SoftplusFuncOptions` class
|
548 |
+
/// to learn what optional arguments are supported for this functional.
|
549 |
+
///
|
550 |
+
/// Example:
|
551 |
+
/// ```
|
552 |
+
/// namespace F = torch::nn::functional;
|
553 |
+
/// F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0));
|
554 |
+
/// ```
|
555 |
+
inline Tensor softplus(
|
556 |
+
const Tensor& input,
|
557 |
+
const SoftplusFuncOptions& options = {}) {
|
558 |
+
return detail::softplus(input, options.beta(), options.threshold());
|
559 |
+
}
|
560 |
+
|
561 |
+
// ============================================================================
|
562 |
+
|
563 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
564 |
+
namespace detail {
|
565 |
+
inline Tensor softshrink(const Tensor& input, double lambda) {
|
566 |
+
return torch::softshrink(input, lambda);
|
567 |
+
}
|
568 |
+
} // namespace detail
|
569 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
570 |
+
|
571 |
+
/// See
|
572 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softshrink
|
573 |
+
/// about the exact behavior of this functional.
|
574 |
+
///
|
575 |
+
/// See the documentation for `torch::nn::functional::SoftshrinkFuncOptions`
|
576 |
+
/// class to learn what optional arguments are supported for this functional.
|
577 |
+
///
|
578 |
+
/// Example:
|
579 |
+
/// ```
|
580 |
+
/// namespace F = torch::nn::functional;
|
581 |
+
/// F::softshrink(x, F::SoftshrinkFuncOptions(0.42));
|
582 |
+
/// ```
|
583 |
+
inline Tensor softshrink(
|
584 |
+
const Tensor& input,
|
585 |
+
const SoftshrinkFuncOptions& options = {}) {
|
586 |
+
return detail::softshrink(input, options.lambda());
|
587 |
+
}
|
588 |
+
|
589 |
+
// ============================================================================
|
590 |
+
|
591 |
+
inline Tensor softsign(const Tensor& input) {
|
592 |
+
return input / (input.abs() + 1);
|
593 |
+
}
|
594 |
+
|
595 |
+
// ============================================================================
|
596 |
+
|
597 |
+
inline Tensor tanhshrink(const Tensor& input) {
|
598 |
+
return input - input.tanh();
|
599 |
+
}
|
600 |
+
|
601 |
+
// ============================================================================
|
602 |
+
|
603 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
604 |
+
namespace detail {
|
605 |
+
inline Tensor threshold(
|
606 |
+
Tensor input,
|
607 |
+
double threshold,
|
608 |
+
double value,
|
609 |
+
bool inplace) {
|
610 |
+
if (inplace) {
|
611 |
+
return torch::threshold_(input, threshold, value);
|
612 |
+
} else {
|
613 |
+
return torch::threshold(input, threshold, value);
|
614 |
+
}
|
615 |
+
}
|
616 |
+
} // namespace detail
|
617 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
618 |
+
|
619 |
+
/// See
|
620 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.threshold
|
621 |
+
/// about the exact behavior of this functional.
|
622 |
+
///
|
623 |
+
/// See the documentation for `torch::nn::functional::ThresholdFuncOptions`
|
624 |
+
/// class to learn what optional arguments are supported for this functional.
|
625 |
+
///
|
626 |
+
/// Example:
|
627 |
+
/// ```
|
628 |
+
/// namespace F = torch::nn::functional;
|
629 |
+
/// F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true));
|
630 |
+
/// ```
|
631 |
+
inline Tensor threshold(Tensor input, const ThresholdFuncOptions& options) {
|
632 |
+
return detail::threshold(
|
633 |
+
std::move(input),
|
634 |
+
options.threshold(),
|
635 |
+
options.value(),
|
636 |
+
options.inplace());
|
637 |
+
}
|
638 |
+
|
639 |
+
// ============================================================================
|
640 |
+
|
641 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
642 |
+
namespace detail {
|
643 |
+
inline std::tuple<Tensor, Tensor> multi_head_attention_forward(
|
644 |
+
const Tensor& query,
|
645 |
+
const Tensor& key,
|
646 |
+
const Tensor& value,
|
647 |
+
int64_t embed_dim_to_check,
|
648 |
+
int64_t num_heads,
|
649 |
+
const Tensor& in_proj_weight,
|
650 |
+
const Tensor& in_proj_bias,
|
651 |
+
const Tensor& bias_k,
|
652 |
+
const Tensor& bias_v,
|
653 |
+
bool add_zero_attn,
|
654 |
+
double dropout_p,
|
655 |
+
const Tensor& out_proj_weight,
|
656 |
+
const Tensor& out_proj_bias,
|
657 |
+
bool training = true,
|
658 |
+
const Tensor& key_padding_mask = {},
|
659 |
+
bool need_weights = true,
|
660 |
+
const Tensor& attn_mask = {},
|
661 |
+
bool use_separate_proj_weight = false,
|
662 |
+
const Tensor& q_proj_weight = {},
|
663 |
+
const Tensor& k_proj_weight = {},
|
664 |
+
const Tensor& v_proj_weight = {},
|
665 |
+
const Tensor& static_k = {},
|
666 |
+
const Tensor& static_v = {},
|
667 |
+
bool average_attn_weights = true) {
|
668 |
+
namespace F = torch::nn::functional;
|
669 |
+
|
670 |
+
const auto query_sizes = query.sizes();
|
671 |
+
const auto& tgt_len = query_sizes[0];
|
672 |
+
const auto& bsz = query_sizes[1];
|
673 |
+
const auto& embed_dim = query_sizes[2];
|
674 |
+
TORCH_INTERNAL_ASSERT(embed_dim == embed_dim_to_check);
|
675 |
+
TORCH_INTERNAL_ASSERT(key.sizes() == value.sizes());
|
676 |
+
|
677 |
+
const auto head_dim = embed_dim / num_heads;
|
678 |
+
TORCH_CHECK(
|
679 |
+
head_dim * num_heads == embed_dim,
|
680 |
+
"embed_dim must be divisible by num_heads");
|
681 |
+
const auto scaling = 1 / std::sqrt(head_dim);
|
682 |
+
|
683 |
+
Tensor q, k, v;
|
684 |
+
if (!use_separate_proj_weight) {
|
685 |
+
if (torch::equal(query, key) && torch::equal(key, value)) {
|
686 |
+
// self-attention
|
687 |
+
const auto chunks =
|
688 |
+
F::linear(query, in_proj_weight, in_proj_bias).chunk(3, /*dim=*/-1);
|
689 |
+
q = chunks[0];
|
690 |
+
k = chunks[1];
|
691 |
+
v = chunks[2];
|
692 |
+
} else if (torch::equal(key, value)) {
|
693 |
+
// encoder-decoder attention
|
694 |
+
// This is inline in_proj function with in_proj_weight and in_proj_bias
|
695 |
+
auto _b = in_proj_bias;
|
696 |
+
auto _start = 0;
|
697 |
+
auto _end = embed_dim;
|
698 |
+
auto _w = in_proj_weight.slice(/*dim=*/0, _start, _end);
|
699 |
+
if (_b.defined()) {
|
700 |
+
_b = _b.slice(/*dim=*/0, _start, _end);
|
701 |
+
}
|
702 |
+
q = F::linear(query, _w, _b);
|
703 |
+
|
704 |
+
if (!key.defined()) {
|
705 |
+
TORCH_INTERNAL_ASSERT(!value.defined());
|
706 |
+
k.reset();
|
707 |
+
v.reset();
|
708 |
+
} else {
|
709 |
+
// This is inline in_proj function with in_proj_weight and in_proj_bias
|
710 |
+
_b = in_proj_bias;
|
711 |
+
_start = embed_dim;
|
712 |
+
_w = in_proj_weight.slice(/*dim=*/0, _start);
|
713 |
+
if (_b.defined()) {
|
714 |
+
_b = _b.slice(/*dim=*/0, _start);
|
715 |
+
}
|
716 |
+
const auto chunks = F::linear(key, _w, _b).chunk(2, /*dim=*/-1);
|
717 |
+
k = chunks[0];
|
718 |
+
v = chunks[1];
|
719 |
+
}
|
720 |
+
} else {
|
721 |
+
// This is inline in_proj function with in_proj_weight and in_proj_bias
|
722 |
+
auto _b = in_proj_bias;
|
723 |
+
auto _start = 0;
|
724 |
+
auto _end = embed_dim;
|
725 |
+
auto _w = in_proj_weight.slice(/*dim=*/0, _start, _end);
|
726 |
+
if (_b.defined()) {
|
727 |
+
_b = _b.slice(/*dim=*/0, _start, _end);
|
728 |
+
}
|
729 |
+
q = F::linear(query, _w, _b);
|
730 |
+
|
731 |
+
// This is inline in_proj function with in_proj_weight and in_proj_bias
|
732 |
+
_b = in_proj_bias;
|
733 |
+
_start = embed_dim;
|
734 |
+
_end = embed_dim * 2;
|
735 |
+
_w = in_proj_weight.slice(/*dim=*/0, _start, _end);
|
736 |
+
if (_b.defined()) {
|
737 |
+
_b = _b.slice(/*dim=*/0, _start, _end);
|
738 |
+
}
|
739 |
+
k = F::linear(key, _w, _b);
|
740 |
+
|
741 |
+
// This is inline in_proj function with in_proj_weight and in_proj_bias
|
742 |
+
_b = in_proj_bias;
|
743 |
+
_start = embed_dim * 2;
|
744 |
+
_w = in_proj_weight.slice(/*dim=*/0, _start);
|
745 |
+
if (_b.defined()) {
|
746 |
+
_b = _b.slice(0, _start);
|
747 |
+
}
|
748 |
+
v = F::linear(value, _w, _b);
|
749 |
+
}
|
750 |
+
} else {
|
751 |
+
const auto& q_proj_weight_non_opt = q_proj_weight;
|
752 |
+
{
|
753 |
+
const auto sizes = q_proj_weight_non_opt.sizes();
|
754 |
+
const auto len1 = sizes[0];
|
755 |
+
const auto len2 = sizes[1];
|
756 |
+
TORCH_CHECK(len1 == embed_dim && len2 == query.size(-1));
|
757 |
+
}
|
758 |
+
|
759 |
+
const auto& k_proj_weight_non_opt = k_proj_weight;
|
760 |
+
{
|
761 |
+
const auto sizes = k_proj_weight_non_opt.sizes();
|
762 |
+
const auto len1 = sizes[0];
|
763 |
+
const auto len2 = sizes[1];
|
764 |
+
TORCH_CHECK(len1 == embed_dim && len2 == key.size(-1));
|
765 |
+
}
|
766 |
+
|
767 |
+
const auto& v_proj_weight_non_opt = v_proj_weight;
|
768 |
+
{
|
769 |
+
const auto sizes = v_proj_weight_non_opt.sizes();
|
770 |
+
const auto len1 = sizes[0];
|
771 |
+
const auto len2 = sizes[1];
|
772 |
+
TORCH_CHECK(len1 == embed_dim && len2 == value.size(-1));
|
773 |
+
}
|
774 |
+
|
775 |
+
if (in_proj_bias.defined()) {
|
776 |
+
q = F::linear(
|
777 |
+
query,
|
778 |
+
q_proj_weight_non_opt,
|
779 |
+
in_proj_bias.slice(/*dim=*/0, 0, embed_dim));
|
780 |
+
k = F::linear(
|
781 |
+
key,
|
782 |
+
k_proj_weight_non_opt,
|
783 |
+
in_proj_bias.slice(/*dim=*/0, embed_dim, (embed_dim * 2)));
|
784 |
+
v = F::linear(
|
785 |
+
value,
|
786 |
+
v_proj_weight_non_opt,
|
787 |
+
in_proj_bias.slice(/*dim=*/0, (embed_dim * 2)));
|
788 |
+
} else {
|
789 |
+
q = F::linear(query, q_proj_weight_non_opt, in_proj_bias);
|
790 |
+
k = F::linear(key, k_proj_weight_non_opt, in_proj_bias);
|
791 |
+
v = F::linear(value, v_proj_weight_non_opt, in_proj_bias);
|
792 |
+
}
|
793 |
+
}
|
794 |
+
q = q * scaling;
|
795 |
+
Tensor attn_mask_ = attn_mask;
|
796 |
+
Tensor key_padding_mask_ = key_padding_mask;
|
797 |
+
if (bias_k.defined() && bias_v.defined()) {
|
798 |
+
if (!static_k.defined() && !static_v.defined()) {
|
799 |
+
k = torch::cat({k, bias_k.repeat({1, bsz, 1})});
|
800 |
+
v = torch::cat({v, bias_v.repeat({1, bsz, 1})});
|
801 |
+
if (attn_mask_.defined()) {
|
802 |
+
attn_mask_ = torch::cat(
|
803 |
+
{attn_mask_,
|
804 |
+
torch::zeros(
|
805 |
+
{attn_mask_.size(0), 1},
|
806 |
+
at::TensorOptions(attn_mask_.dtype())
|
807 |
+
.device(attn_mask_.device()))},
|
808 |
+
/*dim=*/1);
|
809 |
+
}
|
810 |
+
if (key_padding_mask_.defined()) {
|
811 |
+
key_padding_mask_ = torch::cat(
|
812 |
+
{key_padding_mask_,
|
813 |
+
torch::zeros(
|
814 |
+
{key_padding_mask_.size(0), 1},
|
815 |
+
at::TensorOptions(key_padding_mask_.dtype())
|
816 |
+
.device(key_padding_mask_.device()))},
|
817 |
+
/*dim=*/1);
|
818 |
+
}
|
819 |
+
} else {
|
820 |
+
TORCH_CHECK(!static_k.defined(), "bias cannot be added to static key.");
|
821 |
+
TORCH_CHECK(!static_v.defined(), "bias cannot be added to static value.");
|
822 |
+
}
|
823 |
+
} else {
|
824 |
+
TORCH_CHECK(!bias_k.defined());
|
825 |
+
TORCH_CHECK(!bias_v.defined());
|
826 |
+
}
|
827 |
+
q = q.contiguous().view({tgt_len, bsz * num_heads, head_dim}).transpose(0, 1);
|
828 |
+
if (k.defined()) {
|
829 |
+
k = k.contiguous().view({-1, bsz * num_heads, head_dim}).transpose(0, 1);
|
830 |
+
}
|
831 |
+
if (v.defined()) {
|
832 |
+
v = v.contiguous().view({-1, bsz * num_heads, head_dim}).transpose(0, 1);
|
833 |
+
}
|
834 |
+
if (static_k.defined()) {
|
835 |
+
TORCH_CHECK(static_k.size(0) == bsz * num_heads);
|
836 |
+
TORCH_CHECK(static_k.size(2) == head_dim);
|
837 |
+
k = static_k;
|
838 |
+
}
|
839 |
+
if (static_v.defined()) {
|
840 |
+
TORCH_CHECK(static_v.size(0) == bsz * num_heads);
|
841 |
+
TORCH_CHECK(static_v.size(2) == head_dim);
|
842 |
+
v = static_v;
|
843 |
+
}
|
844 |
+
auto src_len = k.size(1);
|
845 |
+
if (key_padding_mask_.defined()) {
|
846 |
+
TORCH_CHECK(key_padding_mask_.size(0) == bsz);
|
847 |
+
TORCH_CHECK(key_padding_mask_.size(1) == src_len);
|
848 |
+
}
|
849 |
+
if (add_zero_attn) {
|
850 |
+
src_len += 1;
|
851 |
+
auto k_sizes = k.sizes().vec();
|
852 |
+
k_sizes[1] = 1;
|
853 |
+
k = torch::cat(
|
854 |
+
{k,
|
855 |
+
torch::zeros(
|
856 |
+
k_sizes, at::TensorOptions(k.dtype()).device(k.device()))},
|
857 |
+
/*dim=*/1);
|
858 |
+
auto v_sizes = v.sizes().vec();
|
859 |
+
v_sizes[1] = 1;
|
860 |
+
v = torch::cat(
|
861 |
+
{v,
|
862 |
+
torch::zeros(
|
863 |
+
v_sizes, at::TensorOptions(v.dtype()).device(v.device()))},
|
864 |
+
/*dim=*/1);
|
865 |
+
if (attn_mask_.defined()) {
|
866 |
+
attn_mask_ = torch::cat(
|
867 |
+
{attn_mask_,
|
868 |
+
torch::zeros(
|
869 |
+
{attn_mask_.size(0), 1},
|
870 |
+
at::TensorOptions(attn_mask_.dtype())
|
871 |
+
.device(attn_mask_.device()))},
|
872 |
+
/*dim=*/1);
|
873 |
+
}
|
874 |
+
if (key_padding_mask_.defined()) {
|
875 |
+
key_padding_mask_ = torch::cat(
|
876 |
+
{key_padding_mask_,
|
877 |
+
torch::zeros(
|
878 |
+
{key_padding_mask_.size(0), 1},
|
879 |
+
at::TensorOptions(key_padding_mask_.dtype())
|
880 |
+
.device(key_padding_mask_.device()))},
|
881 |
+
/*dim=*/1);
|
882 |
+
}
|
883 |
+
}
|
884 |
+
auto attn_output_weights = torch::bmm(q, k.transpose(1, 2));
|
885 |
+
TORCH_CHECK(
|
886 |
+
attn_output_weights.sizes() ==
|
887 |
+
IntArrayRef({bsz * num_heads, tgt_len, src_len}));
|
888 |
+
if (attn_mask_.defined()) {
|
889 |
+
attn_mask_ = attn_mask_.unsqueeze(0);
|
890 |
+
attn_output_weights += attn_mask_;
|
891 |
+
}
|
892 |
+
if (key_padding_mask_.defined()) {
|
893 |
+
attn_output_weights =
|
894 |
+
attn_output_weights.view({bsz, num_heads, tgt_len, src_len});
|
895 |
+
attn_output_weights = AT_DISPATCH_FLOATING_TYPES(
|
896 |
+
attn_output_weights.scalar_type(),
|
897 |
+
"attn_output_weights.masked_fill",
|
898 |
+
[&]() {
|
899 |
+
return attn_output_weights.masked_fill(
|
900 |
+
key_padding_mask_.unsqueeze(1).unsqueeze(2),
|
901 |
+
-std::numeric_limits<scalar_t>::infinity());
|
902 |
+
});
|
903 |
+
attn_output_weights =
|
904 |
+
attn_output_weights.view({bsz * num_heads, tgt_len, src_len});
|
905 |
+
}
|
906 |
+
// NOLINTNEXTLINE(bugprone-argument-comment)
|
907 |
+
attn_output_weights = F::softmax(attn_output_weights, /*dim=*/-1);
|
908 |
+
attn_output_weights = F::dropout(
|
909 |
+
attn_output_weights,
|
910 |
+
F::DropoutFuncOptions().p(dropout_p).training(training));
|
911 |
+
auto attn_output = torch::bmm(attn_output_weights, v);
|
912 |
+
TORCH_CHECK(
|
913 |
+
attn_output.sizes() == IntArrayRef({bsz * num_heads, tgt_len, head_dim}));
|
914 |
+
attn_output =
|
915 |
+
attn_output.transpose(0, 1).contiguous().view({tgt_len, bsz, embed_dim});
|
916 |
+
attn_output = F::linear(attn_output, out_proj_weight, out_proj_bias);
|
917 |
+
if (need_weights) {
|
918 |
+
attn_output_weights =
|
919 |
+
attn_output_weights.view({bsz, num_heads, tgt_len, src_len});
|
920 |
+
if (average_attn_weights) {
|
921 |
+
// average attention weights over heads
|
922 |
+
attn_output_weights = attn_output_weights.sum(/*dim=*/1) / num_heads;
|
923 |
+
}
|
924 |
+
return std::make_tuple(attn_output, attn_output_weights);
|
925 |
+
} else {
|
926 |
+
return std::make_tuple(attn_output, Tensor());
|
927 |
+
}
|
928 |
+
}
|
929 |
+
} // namespace detail
|
930 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
931 |
+
|
932 |
+
inline std::tuple<Tensor, Tensor> multi_head_attention_forward(
|
933 |
+
const Tensor& query,
|
934 |
+
const Tensor& key,
|
935 |
+
const Tensor& value,
|
936 |
+
const MultiheadAttentionForwardFuncOptions& options) {
|
937 |
+
return detail::multi_head_attention_forward(
|
938 |
+
query,
|
939 |
+
key,
|
940 |
+
value,
|
941 |
+
options.embed_dim_to_check(),
|
942 |
+
options.num_heads(),
|
943 |
+
options.in_proj_weight(),
|
944 |
+
options.in_proj_bias(),
|
945 |
+
options.bias_k(),
|
946 |
+
options.bias_v(),
|
947 |
+
options.add_zero_attn(),
|
948 |
+
options.dropout_p(),
|
949 |
+
options.out_proj_weight(),
|
950 |
+
options.out_proj_bias(),
|
951 |
+
options.training(),
|
952 |
+
options.key_padding_mask(),
|
953 |
+
options.need_weights(),
|
954 |
+
options.attn_mask(),
|
955 |
+
options.use_separate_proj_weight(),
|
956 |
+
options.q_proj_weight(),
|
957 |
+
options.k_proj_weight(),
|
958 |
+
options.v_proj_weight(),
|
959 |
+
options.static_k(),
|
960 |
+
options.static_v(),
|
961 |
+
options.average_attn_weights());
|
962 |
+
}
|
963 |
+
|
964 |
+
} // namespace functional
|
965 |
+
} // namespace nn
|
966 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/batchnorm.h
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/irange.h>
|
4 |
+
#include <torch/nn/options/batchnorm.h>
|
5 |
+
#include <torch/types.h>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace nn {
|
9 |
+
namespace functional {
|
10 |
+
|
11 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
12 |
+
namespace detail {
|
13 |
+
inline Tensor batch_norm(
|
14 |
+
const Tensor& input,
|
15 |
+
const Tensor& running_mean,
|
16 |
+
const Tensor& running_var,
|
17 |
+
Tensor weight,
|
18 |
+
Tensor bias,
|
19 |
+
bool training,
|
20 |
+
c10::optional<double> momentum,
|
21 |
+
double eps) {
|
22 |
+
TORCH_CHECK(
|
23 |
+
input.dim() >= 2,
|
24 |
+
"Expected at least 2 input dimensions, but got ",
|
25 |
+
input.dim());
|
26 |
+
if (training) {
|
27 |
+
auto size = input.sizes();
|
28 |
+
int64_t size_prods = size[0];
|
29 |
+
for (const auto i : c10::irange(size.size() - 2)) {
|
30 |
+
size_prods *= size[i + 2];
|
31 |
+
}
|
32 |
+
TORCH_CHECK(
|
33 |
+
size_prods != 1,
|
34 |
+
"Expected more than 1 value per channel when training, got input size ",
|
35 |
+
size);
|
36 |
+
}
|
37 |
+
|
38 |
+
return torch::batch_norm(
|
39 |
+
input,
|
40 |
+
weight,
|
41 |
+
bias,
|
42 |
+
running_mean,
|
43 |
+
running_var,
|
44 |
+
training,
|
45 |
+
momentum.value(),
|
46 |
+
eps,
|
47 |
+
at::globalContext().userEnabledCuDNN());
|
48 |
+
}
|
49 |
+
} // namespace detail
|
50 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
51 |
+
|
52 |
+
/// See
|
53 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.batch_norm
|
54 |
+
/// about the exact behavior of this functional.
|
55 |
+
///
|
56 |
+
/// See the documentation for `torch::nn::functional::BatchNormFuncOptions`
|
57 |
+
/// class to learn what optional arguments are supported for this functional.
|
58 |
+
///
|
59 |
+
/// Example:
|
60 |
+
/// ```
|
61 |
+
/// namespace F = torch::nn::functional;
|
62 |
+
/// F::batch_norm(input, mean, variance,
|
63 |
+
/// F::BatchNormFuncOptions().weight(weight).bias(bias).momentum(0.1).eps(1e-05).training(false));
|
64 |
+
/// ```
|
65 |
+
inline Tensor batch_norm(
|
66 |
+
const Tensor& input,
|
67 |
+
const Tensor& running_mean,
|
68 |
+
const Tensor& running_var,
|
69 |
+
const BatchNormFuncOptions& options = {}) {
|
70 |
+
return detail::batch_norm(
|
71 |
+
input,
|
72 |
+
running_mean,
|
73 |
+
running_var,
|
74 |
+
options.weight(),
|
75 |
+
options.bias(),
|
76 |
+
options.training(),
|
77 |
+
options.momentum(),
|
78 |
+
options.eps());
|
79 |
+
}
|
80 |
+
|
81 |
+
} // namespace functional
|
82 |
+
} // namespace nn
|
83 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/distance.h
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/nn/options/distance.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace nn {
|
7 |
+
namespace functional {
|
8 |
+
|
9 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
10 |
+
namespace detail {
|
11 |
+
inline Tensor cosine_similarity(
|
12 |
+
const Tensor& x1,
|
13 |
+
const Tensor& x2,
|
14 |
+
int64_t dim,
|
15 |
+
double eps) {
|
16 |
+
return torch::cosine_similarity(x1, x2, dim, eps);
|
17 |
+
}
|
18 |
+
} // namespace detail
|
19 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
20 |
+
|
21 |
+
/// See
|
22 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cosine_similarity
|
23 |
+
/// about the exact behavior of this functional.
|
24 |
+
///
|
25 |
+
/// See the documentation for
|
26 |
+
/// `torch::nn::functional::CosineSimilarityFuncOptions` class to learn what
|
27 |
+
/// optional arguments are supported for this functional.
|
28 |
+
///
|
29 |
+
/// Example:
|
30 |
+
/// ```
|
31 |
+
/// namespace F = torch::nn::functional;
|
32 |
+
/// F::cosine_similarity(input1, input2,
|
33 |
+
/// F::CosineSimilarityFuncOptions().dim(1));
|
34 |
+
/// ```
|
35 |
+
inline Tensor cosine_similarity(
|
36 |
+
const Tensor& x1,
|
37 |
+
const Tensor& x2,
|
38 |
+
const CosineSimilarityFuncOptions& options = {}) {
|
39 |
+
return detail::cosine_similarity(x1, x2, options.dim(), options.eps());
|
40 |
+
}
|
41 |
+
|
42 |
+
// ============================================================================
|
43 |
+
|
44 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
45 |
+
namespace detail {
|
46 |
+
inline Tensor pairwise_distance(
|
47 |
+
const Tensor& x1,
|
48 |
+
const Tensor& x2,
|
49 |
+
double p,
|
50 |
+
double eps,
|
51 |
+
bool keepdim) {
|
52 |
+
return torch::pairwise_distance(x1, x2, p, eps, keepdim);
|
53 |
+
}
|
54 |
+
} // namespace detail
|
55 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
56 |
+
|
57 |
+
/// See
|
58 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pairwise_distance
|
59 |
+
/// about the exact behavior of this functional.
|
60 |
+
///
|
61 |
+
/// See the documentation for
|
62 |
+
/// `torch::nn::functional::PairwiseDistanceFuncOptions` class to learn what
|
63 |
+
/// optional arguments are supported for this functional.
|
64 |
+
///
|
65 |
+
/// Example:
|
66 |
+
/// ```
|
67 |
+
/// namespace F = torch::nn::functional;
|
68 |
+
/// F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1));
|
69 |
+
/// ```
|
70 |
+
inline Tensor pairwise_distance(
|
71 |
+
const Tensor& x1,
|
72 |
+
const Tensor& x2,
|
73 |
+
const PairwiseDistanceFuncOptions& options = {}) {
|
74 |
+
return detail::pairwise_distance(
|
75 |
+
x1, x2, options.p(), options.eps(), options.keepdim());
|
76 |
+
}
|
77 |
+
|
78 |
+
// ============================================================================
|
79 |
+
|
80 |
+
/// Computes the p-norm distance between every pair of row vectors in the input.
|
81 |
+
/// This function will be faster if the rows are contiguous.
|
82 |
+
inline Tensor pdist(const Tensor& input, double p = 2.0) {
|
83 |
+
return torch::pdist(input, p);
|
84 |
+
}
|
85 |
+
|
86 |
+
} // namespace functional
|
87 |
+
} // namespace nn
|
88 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/dropout.h
ADDED
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/nn/options/dropout.h>
|
4 |
+
|
5 |
+
#include <utility>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace nn {
|
9 |
+
namespace functional {
|
10 |
+
|
11 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
12 |
+
namespace detail {
|
13 |
+
|
14 |
+
inline Tensor dropout(Tensor input, double p, bool training, bool inplace) {
|
15 |
+
TORCH_CHECK(
|
16 |
+
p >= 0. && p <= 1.,
|
17 |
+
"dropout probability has to be between 0 and 1, but got ",
|
18 |
+
p);
|
19 |
+
if (inplace) {
|
20 |
+
return torch::dropout_(input, p, training);
|
21 |
+
} else {
|
22 |
+
return torch::dropout(input, p, training);
|
23 |
+
}
|
24 |
+
}
|
25 |
+
|
26 |
+
} // namespace detail
|
27 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
28 |
+
|
29 |
+
/// See
|
30 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout
|
31 |
+
/// about the exact behavior of this functional.
|
32 |
+
///
|
33 |
+
/// See the documentation for `torch::nn::functional::DropoutFuncOptions` class
|
34 |
+
/// to learn what optional arguments are supported for this functional.
|
35 |
+
///
|
36 |
+
/// Example:
|
37 |
+
/// ```
|
38 |
+
/// namespace F = torch::nn::functional;
|
39 |
+
/// F::dropout(input, F::DropoutFuncOptions().p(0.5));
|
40 |
+
/// ```
|
41 |
+
inline Tensor dropout(Tensor input, const DropoutFuncOptions& options = {}) {
|
42 |
+
return detail::dropout(
|
43 |
+
std::move(input), options.p(), options.training(), options.inplace());
|
44 |
+
}
|
45 |
+
|
46 |
+
// ============================================================================
|
47 |
+
|
48 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
49 |
+
namespace detail {
|
50 |
+
|
51 |
+
template <int64_t unbatched_dim, int64_t batched_dim>
|
52 |
+
inline Tensor _dropoutNd_helper(
|
53 |
+
Tensor input,
|
54 |
+
double p,
|
55 |
+
bool training,
|
56 |
+
bool inplace,
|
57 |
+
const char* fn_name) {
|
58 |
+
TORCH_CHECK(
|
59 |
+
p >= 0. && p <= 1.,
|
60 |
+
"dropout probability has to be between 0 and 1, but got ",
|
61 |
+
p);
|
62 |
+
|
63 |
+
auto inp_dim = input.dim();
|
64 |
+
auto is_batched = inp_dim == batched_dim;
|
65 |
+
if (!is_batched) {
|
66 |
+
if (inplace) {
|
67 |
+
input = input.unsqueeze_(0);
|
68 |
+
} else {
|
69 |
+
input = input.unsqueeze(0);
|
70 |
+
}
|
71 |
+
}
|
72 |
+
|
73 |
+
Tensor result;
|
74 |
+
if (inplace) {
|
75 |
+
result = torch::feature_dropout_(input, p, training);
|
76 |
+
} else {
|
77 |
+
result = torch::feature_dropout(input, p, training);
|
78 |
+
}
|
79 |
+
|
80 |
+
if (!is_batched) {
|
81 |
+
if (inplace) {
|
82 |
+
result = result.squeeze_(0);
|
83 |
+
} else {
|
84 |
+
result = result.squeeze(0);
|
85 |
+
}
|
86 |
+
}
|
87 |
+
return result;
|
88 |
+
}
|
89 |
+
|
90 |
+
inline Tensor dropout2d(Tensor input, double p, bool training, bool inplace) {
|
91 |
+
return _dropoutNd_helper<3, 4>(
|
92 |
+
std::move(input), p, training, inplace, "dropout2d");
|
93 |
+
}
|
94 |
+
|
95 |
+
} // namespace detail
|
96 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
97 |
+
|
98 |
+
/// See
|
99 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout2d
|
100 |
+
/// about the exact behavior of this functional.
|
101 |
+
///
|
102 |
+
/// See the documentation for `torch::nn::functional::Dropout2dFuncOptions`
|
103 |
+
/// class to learn what optional arguments are supported for this functional.
|
104 |
+
///
|
105 |
+
/// Example:
|
106 |
+
/// ```
|
107 |
+
/// namespace F = torch::nn::functional;
|
108 |
+
/// F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5));
|
109 |
+
/// ```
|
110 |
+
inline Tensor dropout2d(
|
111 |
+
Tensor input,
|
112 |
+
const Dropout2dFuncOptions& options = {}) {
|
113 |
+
return detail::dropout2d(
|
114 |
+
std::move(input), options.p(), options.training(), options.inplace());
|
115 |
+
}
|
116 |
+
|
117 |
+
// ============================================================================
|
118 |
+
|
119 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
120 |
+
namespace detail {
|
121 |
+
|
122 |
+
inline Tensor dropout3d(Tensor input, double p, bool training, bool inplace) {
|
123 |
+
return _dropoutNd_helper<4, 5>(
|
124 |
+
std::move(input), p, training, inplace, "dropout3d");
|
125 |
+
}
|
126 |
+
|
127 |
+
} // namespace detail
|
128 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
129 |
+
|
130 |
+
/// See
|
131 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout3d
|
132 |
+
/// about the exact behavior of this functional.
|
133 |
+
///
|
134 |
+
/// See the documentation for `torch::nn::functional::Dropout3dFuncOptions`
|
135 |
+
/// class to learn what optional arguments are supported for this functional.
|
136 |
+
///
|
137 |
+
/// Example:
|
138 |
+
/// ```
|
139 |
+
/// namespace F = torch::nn::functional;
|
140 |
+
/// F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5));
|
141 |
+
/// ```
|
142 |
+
inline Tensor dropout3d(
|
143 |
+
Tensor input,
|
144 |
+
const Dropout3dFuncOptions& options = {}) {
|
145 |
+
return detail::dropout3d(
|
146 |
+
std::move(input), options.p(), options.training(), options.inplace());
|
147 |
+
}
|
148 |
+
|
149 |
+
// ============================================================================
|
150 |
+
|
151 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
152 |
+
namespace detail {
|
153 |
+
|
154 |
+
inline Tensor alpha_dropout(
|
155 |
+
Tensor input,
|
156 |
+
double p,
|
157 |
+
bool training,
|
158 |
+
bool inplace) {
|
159 |
+
if (p < 0. || p > 1.) {
|
160 |
+
TORCH_CHECK(
|
161 |
+
false, "dropout probability has to be between 0 and 1, but got ", p);
|
162 |
+
}
|
163 |
+
return inplace ? torch::alpha_dropout_(input, p, training)
|
164 |
+
: torch::alpha_dropout(input, p, training);
|
165 |
+
}
|
166 |
+
|
167 |
+
} // namespace detail
|
168 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
169 |
+
|
170 |
+
/// See
|
171 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.alpha_dropout
|
172 |
+
/// about the exact behavior of this functional.
|
173 |
+
///
|
174 |
+
/// See the documentation for `torch::nn::functional::AlphaDropoutFuncOptions`
|
175 |
+
/// class to learn what optional arguments are supported for this functional.
|
176 |
+
///
|
177 |
+
/// Example:
|
178 |
+
/// ```
|
179 |
+
/// namespace F = torch::nn::functional;
|
180 |
+
/// F::alpha_dropout(input,
|
181 |
+
/// F::AlphaDropoutFuncOptions().p(0.5).training(false));
|
182 |
+
/// ```
|
183 |
+
inline Tensor alpha_dropout(
|
184 |
+
Tensor input,
|
185 |
+
const AlphaDropoutFuncOptions& options = {}) {
|
186 |
+
return detail::alpha_dropout(
|
187 |
+
std::move(input), options.p(), options.training(), options.inplace());
|
188 |
+
}
|
189 |
+
|
190 |
+
// ============================================================================
|
191 |
+
|
192 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
193 |
+
namespace detail {
|
194 |
+
|
195 |
+
inline Tensor feature_alpha_dropout(
|
196 |
+
Tensor input,
|
197 |
+
double p,
|
198 |
+
bool training,
|
199 |
+
bool inplace) {
|
200 |
+
if (p < 0. || p > 1.) {
|
201 |
+
TORCH_CHECK(
|
202 |
+
false, "dropout probability has to be between 0 and 1, but got ", p);
|
203 |
+
}
|
204 |
+
return inplace ? torch::feature_alpha_dropout_(input, p, training)
|
205 |
+
: torch::feature_alpha_dropout(input, p, training);
|
206 |
+
}
|
207 |
+
|
208 |
+
} // namespace detail
|
209 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
210 |
+
|
211 |
+
/// See
|
212 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.feature_alpha_dropout
|
213 |
+
/// about the exact behavior of this functional.
|
214 |
+
///
|
215 |
+
/// See the documentation for
|
216 |
+
/// `torch::nn::functional::FeatureAlphaDropoutFuncOptions` class to learn what
|
217 |
+
/// optional arguments are supported for this functional.
|
218 |
+
///
|
219 |
+
/// Example:
|
220 |
+
/// ```
|
221 |
+
/// namespace F = torch::nn::functional;
|
222 |
+
/// F::feature_alpha_dropout(input,
|
223 |
+
/// F::FeatureAlphaDropoutFuncOptions().p(0.5).training(false));
|
224 |
+
/// ```
|
225 |
+
inline Tensor feature_alpha_dropout(
|
226 |
+
Tensor input,
|
227 |
+
const FeatureAlphaDropoutFuncOptions& options = {}) {
|
228 |
+
return detail::feature_alpha_dropout(
|
229 |
+
std::move(input), options.p(), options.training(), options.inplace());
|
230 |
+
}
|
231 |
+
|
232 |
+
} // namespace functional
|
233 |
+
} // namespace nn
|
234 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/embedding.h
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/nn/options/embedding.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace nn {
|
7 |
+
namespace functional {
|
8 |
+
|
9 |
+
inline Tensor one_hot(const Tensor& tensor, int64_t num_classes = -1) {
|
10 |
+
return torch::one_hot(tensor, num_classes);
|
11 |
+
}
|
12 |
+
|
13 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
14 |
+
namespace detail {
|
15 |
+
inline void _no_grad_embedding_renorm_(
|
16 |
+
Tensor weight,
|
17 |
+
const Tensor& input,
|
18 |
+
float max_norm,
|
19 |
+
float norm_type) {
|
20 |
+
torch::NoGradGuard no_grad;
|
21 |
+
torch::embedding_renorm_(weight, input, max_norm, norm_type);
|
22 |
+
}
|
23 |
+
|
24 |
+
inline Tensor embedding(
|
25 |
+
const Tensor& input,
|
26 |
+
const Tensor& weight,
|
27 |
+
c10::optional<int64_t> padding_idx,
|
28 |
+
c10::optional<double> max_norm,
|
29 |
+
double norm_type,
|
30 |
+
bool scale_grad_by_freq,
|
31 |
+
bool sparse) {
|
32 |
+
auto input_ = input;
|
33 |
+
|
34 |
+
if (padding_idx != c10::nullopt) {
|
35 |
+
if (*padding_idx > 0) {
|
36 |
+
TORCH_CHECK(
|
37 |
+
*padding_idx < weight.size(0),
|
38 |
+
"Padding_idx must be within num_embeddings");
|
39 |
+
} else if (*padding_idx < 0) {
|
40 |
+
TORCH_CHECK(
|
41 |
+
*padding_idx >= -weight.size(0),
|
42 |
+
"Padding_idx must be within num_embedding");
|
43 |
+
padding_idx = weight.size(0) + *padding_idx;
|
44 |
+
}
|
45 |
+
} else {
|
46 |
+
padding_idx = -1;
|
47 |
+
}
|
48 |
+
|
49 |
+
if (max_norm != c10::nullopt) {
|
50 |
+
input_ = input_.contiguous();
|
51 |
+
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
52 |
+
_no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type);
|
53 |
+
}
|
54 |
+
return torch::embedding(
|
55 |
+
weight, input_, *padding_idx, scale_grad_by_freq, sparse);
|
56 |
+
}
|
57 |
+
} // namespace detail
|
58 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
59 |
+
|
60 |
+
/// See
|
61 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.embedding
|
62 |
+
/// about the exact behavior of this functional.
|
63 |
+
///
|
64 |
+
/// See the documentation for `torch::nn::functional::EmbeddingFuncOptions`
|
65 |
+
/// class to learn what optional arguments are supported for this functional.
|
66 |
+
///
|
67 |
+
/// Example:
|
68 |
+
/// ```
|
69 |
+
/// namespace F = torch::nn::functional;
|
70 |
+
/// F::embedding(input, weight,
|
71 |
+
/// F::EmbeddingFuncOptions().norm_type(2.5).scale_grad_by_freq(true).sparse(true));
|
72 |
+
/// ```
|
73 |
+
inline Tensor embedding(
|
74 |
+
const Tensor& input,
|
75 |
+
const Tensor& weight,
|
76 |
+
const EmbeddingFuncOptions& options = {}) {
|
77 |
+
return detail::embedding(
|
78 |
+
input,
|
79 |
+
weight,
|
80 |
+
options.padding_idx(),
|
81 |
+
options.max_norm(),
|
82 |
+
options.norm_type(),
|
83 |
+
options.scale_grad_by_freq(),
|
84 |
+
options.sparse());
|
85 |
+
}
|
86 |
+
|
87 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
88 |
+
namespace detail {
|
89 |
+
inline Tensor embedding_bag(
|
90 |
+
const Tensor& input,
|
91 |
+
const Tensor& weight,
|
92 |
+
const Tensor& offsets,
|
93 |
+
c10::optional<double> max_norm,
|
94 |
+
double norm_type,
|
95 |
+
bool scale_grad_by_freq,
|
96 |
+
EmbeddingBagMode mode,
|
97 |
+
bool sparse,
|
98 |
+
const Tensor& per_sample_weights,
|
99 |
+
bool include_last_offset,
|
100 |
+
c10::optional<int64_t> padding_idx) {
|
101 |
+
auto input_ = input;
|
102 |
+
auto offsets_ = offsets;
|
103 |
+
auto per_sample_weights_ = per_sample_weights;
|
104 |
+
TORCH_CHECK(
|
105 |
+
!per_sample_weights_.defined() ||
|
106 |
+
input_.sizes() == per_sample_weights_.sizes(),
|
107 |
+
"embedding_bag: If per_sample_weights (",
|
108 |
+
per_sample_weights_.sizes(),
|
109 |
+
") is not null, then it must have the same shape as the input (",
|
110 |
+
input_.sizes(),
|
111 |
+
")");
|
112 |
+
if (input_.dim() == 2) {
|
113 |
+
TORCH_CHECK(
|
114 |
+
!offsets_.defined(),
|
115 |
+
"If input is 2D, then offsets has to be null, as input is treated is a mini-batch of fixed length sequences. However, found offsets of type Tensor");
|
116 |
+
offsets_ = torch::arange(
|
117 |
+
0,
|
118 |
+
input_.numel(),
|
119 |
+
input_.size(1),
|
120 |
+
torch::TensorOptions().dtype(torch::kLong).device(input_.device()));
|
121 |
+
input_ = input_.reshape(-1);
|
122 |
+
if (per_sample_weights_.defined()) {
|
123 |
+
per_sample_weights_ = per_sample_weights_.reshape(-1);
|
124 |
+
}
|
125 |
+
} else if (input_.dim() == 1) {
|
126 |
+
TORCH_CHECK(
|
127 |
+
offsets_.defined(), "offsets has to be a 1D Tensor but got null");
|
128 |
+
TORCH_CHECK(offsets_.dim() == 1, "offsets has to be a 1D Tensor");
|
129 |
+
} else {
|
130 |
+
TORCH_CHECK(
|
131 |
+
false,
|
132 |
+
"input has to be 1D or 2D Tensor, but got Tensor of dimension ",
|
133 |
+
input_.dim());
|
134 |
+
}
|
135 |
+
|
136 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
137 |
+
int mode_enum;
|
138 |
+
if (std::holds_alternative<enumtype::kSum>(mode)) {
|
139 |
+
mode_enum = 0;
|
140 |
+
} else if (std::holds_alternative<enumtype::kMean>(mode)) {
|
141 |
+
mode_enum = 1;
|
142 |
+
} else if (std::holds_alternative<enumtype::kMax>(mode)) {
|
143 |
+
mode_enum = 2;
|
144 |
+
TORCH_CHECK(
|
145 |
+
!scale_grad_by_freq,
|
146 |
+
"max mode does not support scaling the gradient by the frequency");
|
147 |
+
TORCH_CHECK(!sparse, "max mode does not support sparse weights");
|
148 |
+
} else {
|
149 |
+
TORCH_CHECK(false, "mode has to be one of sum, mean or max");
|
150 |
+
}
|
151 |
+
|
152 |
+
if (max_norm != c10::nullopt) {
|
153 |
+
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
154 |
+
_no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type);
|
155 |
+
}
|
156 |
+
|
157 |
+
TORCH_CHECK(
|
158 |
+
!per_sample_weights_.defined() || std::get_if<enumtype::kSum>(&mode),
|
159 |
+
"embedding_bag: per_sample_weights was not null. ",
|
160 |
+
"per_sample_weights is only supported for mode='kSum' (got mode='",
|
161 |
+
torch::enumtype::get_enum_name(mode),
|
162 |
+
"').Please open a feature request on GitHub.");
|
163 |
+
|
164 |
+
return std::get<0>(torch::embedding_bag(
|
165 |
+
weight,
|
166 |
+
input_,
|
167 |
+
offsets_,
|
168 |
+
scale_grad_by_freq,
|
169 |
+
mode_enum,
|
170 |
+
sparse,
|
171 |
+
per_sample_weights_,
|
172 |
+
include_last_offset,
|
173 |
+
padding_idx));
|
174 |
+
}
|
175 |
+
} // namespace detail
|
176 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
177 |
+
|
178 |
+
/// See
|
179 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.embedding_bag
|
180 |
+
/// about the exact behavior of this functional.
|
181 |
+
///
|
182 |
+
/// See the documentation for `torch::nn::functional::EmbeddingBagFuncOptions`
|
183 |
+
/// class to learn what optional arguments are supported for this functional.
|
184 |
+
///
|
185 |
+
/// Example:
|
186 |
+
/// ```
|
187 |
+
/// namespace F = torch::nn::functional;
|
188 |
+
/// F::embedding_bag(input, weight,
|
189 |
+
/// F::EmbeddingBagFuncOptions().mode(torch::kSum).offsets(offsets));
|
190 |
+
/// ```
|
191 |
+
inline Tensor embedding_bag(
|
192 |
+
const Tensor& input,
|
193 |
+
const Tensor& weight,
|
194 |
+
const EmbeddingBagFuncOptions& options = {}) {
|
195 |
+
return detail::embedding_bag(
|
196 |
+
input,
|
197 |
+
weight,
|
198 |
+
options.offsets(),
|
199 |
+
options.max_norm(),
|
200 |
+
options.norm_type(),
|
201 |
+
options.scale_grad_by_freq(),
|
202 |
+
options.mode(),
|
203 |
+
options.sparse(),
|
204 |
+
options.per_sample_weights(),
|
205 |
+
options.include_last_offset(),
|
206 |
+
options.padding_idx());
|
207 |
+
}
|
208 |
+
|
209 |
+
} // namespace functional
|
210 |
+
} // namespace nn
|
211 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/fold.h
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/nn/options/fold.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace nn {
|
7 |
+
namespace functional {
|
8 |
+
|
9 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
10 |
+
namespace detail {
|
11 |
+
inline Tensor fold(
|
12 |
+
const Tensor& input,
|
13 |
+
ExpandingArray<2> output_size,
|
14 |
+
ExpandingArray<2> kernel_size,
|
15 |
+
ExpandingArray<2> dilation,
|
16 |
+
ExpandingArray<2> padding,
|
17 |
+
ExpandingArray<2> stride) {
|
18 |
+
if (input.dim() == 3 || input.dim() == 2) {
|
19 |
+
return torch::col2im(
|
20 |
+
input, output_size, kernel_size, dilation, padding, stride);
|
21 |
+
} else {
|
22 |
+
TORCH_CHECK(
|
23 |
+
false,
|
24 |
+
"Input Error: Only unbatched (2D) or batched (3D) input Tensors are supported "
|
25 |
+
"(got ",
|
26 |
+
input.dim(),
|
27 |
+
"D)");
|
28 |
+
}
|
29 |
+
}
|
30 |
+
} // namespace detail
|
31 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
32 |
+
|
33 |
+
/// See
|
34 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.fold
|
35 |
+
/// about the exact behavior of this functional.
|
36 |
+
///
|
37 |
+
/// See the documentation for `torch::nn::functional::FoldFuncOptions` class to
|
38 |
+
/// learn what optional arguments are supported for this functional.
|
39 |
+
///
|
40 |
+
/// Example:
|
41 |
+
/// ```
|
42 |
+
/// namespace F = torch::nn::functional;
|
43 |
+
/// F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2}));
|
44 |
+
/// ```
|
45 |
+
inline Tensor fold(const Tensor& input, const FoldFuncOptions& options) {
|
46 |
+
return detail::fold(
|
47 |
+
input,
|
48 |
+
options.output_size(),
|
49 |
+
options.kernel_size(),
|
50 |
+
options.dilation(),
|
51 |
+
options.padding(),
|
52 |
+
options.stride());
|
53 |
+
}
|
54 |
+
|
55 |
+
// ============================================================================
|
56 |
+
|
57 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
58 |
+
namespace detail {
|
59 |
+
inline Tensor unfold(
|
60 |
+
const Tensor& input,
|
61 |
+
ExpandingArray<2> kernel_size,
|
62 |
+
ExpandingArray<2> dilation,
|
63 |
+
ExpandingArray<2> padding,
|
64 |
+
ExpandingArray<2> stride) {
|
65 |
+
if (input.dim() == 4) {
|
66 |
+
return torch::im2col(input, kernel_size, dilation, padding, stride);
|
67 |
+
} else {
|
68 |
+
TORCH_CHECK(
|
69 |
+
false,
|
70 |
+
"Input Error: Only 4D input Tensors are supported "
|
71 |
+
"(got ",
|
72 |
+
input.dim(),
|
73 |
+
"D)");
|
74 |
+
}
|
75 |
+
}
|
76 |
+
} // namespace detail
|
77 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
78 |
+
|
79 |
+
/// See
|
80 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.unfold
|
81 |
+
/// about the exact behavior of this functional.
|
82 |
+
///
|
83 |
+
/// See the documentation for `torch::nn::functional::UnfoldFuncOptions` class
|
84 |
+
/// to learn what optional arguments are supported for this functional.
|
85 |
+
///
|
86 |
+
/// Example:
|
87 |
+
/// ```
|
88 |
+
/// namespace F = torch::nn::functional;
|
89 |
+
/// F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2));
|
90 |
+
/// ```
|
91 |
+
inline Tensor unfold(const Tensor& input, const UnfoldFuncOptions& options) {
|
92 |
+
return detail::unfold(
|
93 |
+
input,
|
94 |
+
options.kernel_size(),
|
95 |
+
options.dilation(),
|
96 |
+
options.padding(),
|
97 |
+
options.stride());
|
98 |
+
}
|
99 |
+
|
100 |
+
} // namespace functional
|
101 |
+
} // namespace nn
|
102 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/instancenorm.h
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/nn/options/instancenorm.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace nn {
|
7 |
+
namespace functional {
|
8 |
+
|
9 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
10 |
+
namespace detail {
|
11 |
+
inline Tensor instance_norm(
|
12 |
+
const Tensor& input,
|
13 |
+
const Tensor& running_mean,
|
14 |
+
const Tensor& running_var,
|
15 |
+
const Tensor& weight,
|
16 |
+
const Tensor& bias,
|
17 |
+
bool use_input_stats,
|
18 |
+
double momentum,
|
19 |
+
double eps) {
|
20 |
+
return torch::instance_norm(
|
21 |
+
input,
|
22 |
+
weight,
|
23 |
+
bias,
|
24 |
+
running_mean,
|
25 |
+
running_var,
|
26 |
+
use_input_stats,
|
27 |
+
momentum,
|
28 |
+
eps,
|
29 |
+
at::globalContext().userEnabledCuDNN());
|
30 |
+
}
|
31 |
+
} // namespace detail
|
32 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
33 |
+
|
34 |
+
/// See
|
35 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.instance_norm
|
36 |
+
/// about the exact behavior of this functional.
|
37 |
+
///
|
38 |
+
/// See the documentation for `torch::nn::functional::InstanceNormFuncOptions`
|
39 |
+
/// class to learn what optional arguments are supported for this functional.
|
40 |
+
///
|
41 |
+
/// Example:
|
42 |
+
/// ```
|
43 |
+
/// namespace F = torch::nn::functional;
|
44 |
+
/// F::instance_norm(input,
|
45 |
+
/// F::InstanceNormFuncOptions().running_mean(mean).running_var(variance).weight(weight).bias(bias).momentum(0.1).eps(1e-5));
|
46 |
+
/// ```
|
47 |
+
inline Tensor instance_norm(
|
48 |
+
const Tensor& input,
|
49 |
+
const InstanceNormFuncOptions& options = {}) {
|
50 |
+
return detail::instance_norm(
|
51 |
+
input,
|
52 |
+
options.running_mean(),
|
53 |
+
options.running_var(),
|
54 |
+
options.weight(),
|
55 |
+
options.bias(),
|
56 |
+
options.use_input_stats(),
|
57 |
+
options.momentum(),
|
58 |
+
options.eps());
|
59 |
+
}
|
60 |
+
|
61 |
+
} // namespace functional
|
62 |
+
} // namespace nn
|
63 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/linear.h
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/types.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace nn {
|
7 |
+
namespace functional {
|
8 |
+
|
9 |
+
inline Tensor bilinear(
|
10 |
+
const Tensor& input1,
|
11 |
+
const Tensor& input2,
|
12 |
+
const Tensor& weight,
|
13 |
+
const Tensor& bias = Tensor()) {
|
14 |
+
return torch::bilinear(input1, input2, weight, bias);
|
15 |
+
}
|
16 |
+
|
17 |
+
// ============================================================================
|
18 |
+
|
19 |
+
inline Tensor linear(
|
20 |
+
const Tensor& input,
|
21 |
+
const Tensor& weight,
|
22 |
+
const Tensor& bias = {}) {
|
23 |
+
if (input.dim() == 2 && bias.defined()) {
|
24 |
+
// fused op is marginally faster
|
25 |
+
return torch::addmm(bias, input, weight.t());
|
26 |
+
} else {
|
27 |
+
auto output = input.matmul(weight.t());
|
28 |
+
if (bias.defined()) {
|
29 |
+
output += bias;
|
30 |
+
}
|
31 |
+
return output;
|
32 |
+
}
|
33 |
+
}
|
34 |
+
|
35 |
+
} // namespace functional
|
36 |
+
} // namespace nn
|
37 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/loss.h
ADDED
@@ -0,0 +1,1044 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/ExpandUtils.h>
|
4 |
+
#include <torch/nn/functional/activation.h>
|
5 |
+
#include <torch/nn/options/loss.h>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace nn {
|
9 |
+
namespace functional {
|
10 |
+
|
11 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
12 |
+
namespace detail {
|
13 |
+
inline Tensor l1_loss(
|
14 |
+
const Tensor& input,
|
15 |
+
const Tensor& target,
|
16 |
+
L1LossFuncOptions::reduction_t reduction) {
|
17 |
+
return torch::l1_loss(input, target, enumtype::reduction_get_enum(reduction));
|
18 |
+
}
|
19 |
+
} // namespace detail
|
20 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
21 |
+
|
22 |
+
/// See
|
23 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.l1_loss
|
24 |
+
/// about the exact behavior of this functional.
|
25 |
+
///
|
26 |
+
/// See the documentation for `torch::nn::functional::L1LossFuncOptions` class
|
27 |
+
/// to learn what optional arguments are supported for this functional.
|
28 |
+
///
|
29 |
+
/// Example:
|
30 |
+
/// ```
|
31 |
+
/// namespace F = torch::nn::functional;
|
32 |
+
/// F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone));
|
33 |
+
/// ```
|
34 |
+
inline Tensor l1_loss(
|
35 |
+
const Tensor& input,
|
36 |
+
const Tensor& target,
|
37 |
+
const L1LossFuncOptions& options = {}) {
|
38 |
+
return detail::l1_loss(input, target, options.reduction());
|
39 |
+
}
|
40 |
+
|
41 |
+
// ============================================================================
|
42 |
+
|
43 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
44 |
+
namespace detail {
|
45 |
+
inline Tensor kl_div(
|
46 |
+
const Tensor& input,
|
47 |
+
const Tensor& target,
|
48 |
+
KLDivFuncOptions::reduction_t reduction,
|
49 |
+
bool log_target = false) {
|
50 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
51 |
+
torch::Reduction::Reduction reduction_enum;
|
52 |
+
|
53 |
+
if (std::holds_alternative<enumtype::kMean>(reduction)) {
|
54 |
+
TORCH_WARN(
|
55 |
+
"reduction: 'mean' divides the total loss by both the batch size and the support size."
|
56 |
+
"'batchmean' divides only by the batch size, and aligns with the KL div math definition."
|
57 |
+
"'mean' will be changed to behave the same as 'batchmean' in the next major release.");
|
58 |
+
}
|
59 |
+
|
60 |
+
// special case for batchmean
|
61 |
+
if (std::holds_alternative<enumtype::kBatchMean>(reduction)) {
|
62 |
+
reduction_enum = torch::Reduction::Sum;
|
63 |
+
} else {
|
64 |
+
reduction_enum = enumtype::reduction_get_enum(reduction);
|
65 |
+
}
|
66 |
+
|
67 |
+
auto reduced = torch::kl_div(input, target, reduction_enum, log_target);
|
68 |
+
|
69 |
+
if (std::holds_alternative<enumtype::kBatchMean>(reduction) &&
|
70 |
+
input.dim() != 0) {
|
71 |
+
reduced = reduced / input.sizes()[0];
|
72 |
+
}
|
73 |
+
|
74 |
+
return reduced;
|
75 |
+
}
|
76 |
+
} // namespace detail
|
77 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
78 |
+
|
79 |
+
/// See
|
80 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.kl_div
|
81 |
+
/// about the exact behavior of this functional.
|
82 |
+
///
|
83 |
+
/// See the documentation for `torch::nn::functional::KLDivFuncOptions` class to
|
84 |
+
/// learn what optional arguments are supported for this functional.
|
85 |
+
///
|
86 |
+
/// Example:
|
87 |
+
/// ```
|
88 |
+
/// namespace F = torch::nn::functional;
|
89 |
+
/// F::kl_div(input, target,
|
90 |
+
/// F::KLDivFuncOptions.reduction(torch::kNone).log_target(false));
|
91 |
+
/// ```
|
92 |
+
inline Tensor kl_div(
|
93 |
+
const Tensor& input,
|
94 |
+
const Tensor& target,
|
95 |
+
const KLDivFuncOptions& options = {}) {
|
96 |
+
return detail::kl_div(
|
97 |
+
input, target, options.reduction(), options.log_target());
|
98 |
+
}
|
99 |
+
|
100 |
+
// ============================================================================
|
101 |
+
|
102 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
103 |
+
namespace detail {
|
104 |
+
inline Tensor mse_loss(
|
105 |
+
const Tensor& input,
|
106 |
+
const Tensor& target,
|
107 |
+
MSELossFuncOptions::reduction_t reduction) {
|
108 |
+
if (!(target.sizes() == input.sizes())) {
|
109 |
+
TORCH_WARN(
|
110 |
+
"Using a target size (",
|
111 |
+
target.sizes(),
|
112 |
+
") that is different to the input size (",
|
113 |
+
input.sizes(),
|
114 |
+
"). ",
|
115 |
+
"This will likely lead to incorrect results due to broadcasting. ",
|
116 |
+
"Please ensure they have the same size.");
|
117 |
+
}
|
118 |
+
std::vector<torch::Tensor> broadcast_tensors =
|
119 |
+
torch::broadcast_tensors({input, target});
|
120 |
+
auto expanded_input = broadcast_tensors[0];
|
121 |
+
auto expanded_target = broadcast_tensors[1];
|
122 |
+
return torch::mse_loss(
|
123 |
+
expanded_input, expanded_target, enumtype::reduction_get_enum(reduction));
|
124 |
+
}
|
125 |
+
} // namespace detail
|
126 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
127 |
+
|
128 |
+
/// See
|
129 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.mse_loss
|
130 |
+
/// about the exact behavior of this functional.
|
131 |
+
///
|
132 |
+
/// See the documentation for `torch::nn::functional::MSELossFuncOptions` class
|
133 |
+
/// to learn what optional arguments are supported for this functional.
|
134 |
+
///
|
135 |
+
/// Example:
|
136 |
+
/// ```
|
137 |
+
/// namespace F = torch::nn::functional;
|
138 |
+
/// F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone));
|
139 |
+
/// ```
|
140 |
+
inline Tensor mse_loss(
|
141 |
+
const Tensor& input,
|
142 |
+
const Tensor& target,
|
143 |
+
const MSELossFuncOptions& options = {}) {
|
144 |
+
return detail::mse_loss(input, target, options.reduction());
|
145 |
+
}
|
146 |
+
|
147 |
+
// ============================================================================
|
148 |
+
|
149 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
150 |
+
namespace detail {
|
151 |
+
inline Tensor binary_cross_entropy(
|
152 |
+
const Tensor& input,
|
153 |
+
const Tensor& target,
|
154 |
+
const Tensor& weight,
|
155 |
+
BinaryCrossEntropyFuncOptions::reduction_t reduction) {
|
156 |
+
auto reduction_enum = enumtype::reduction_get_enum(reduction);
|
157 |
+
|
158 |
+
if (target.sizes() != input.sizes()) {
|
159 |
+
TORCH_CHECK(
|
160 |
+
false,
|
161 |
+
"Using a target size (",
|
162 |
+
target.sizes(),
|
163 |
+
") ",
|
164 |
+
"that is different to the input size (",
|
165 |
+
input.sizes(),
|
166 |
+
") is deprecated. ",
|
167 |
+
"Please ensure they have the same size.");
|
168 |
+
}
|
169 |
+
|
170 |
+
auto weight_ = weight;
|
171 |
+
if (weight_.defined()) {
|
172 |
+
auto new_size = at::infer_size(target.sizes(), weight_.sizes());
|
173 |
+
weight_ = weight_.expand(new_size);
|
174 |
+
}
|
175 |
+
|
176 |
+
return torch::binary_cross_entropy(input, target, weight_, reduction_enum);
|
177 |
+
}
|
178 |
+
} // namespace detail
|
179 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
180 |
+
|
181 |
+
/// See
|
182 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.binary_cross_entropy
|
183 |
+
/// about the exact behavior of this functional.
|
184 |
+
///
|
185 |
+
/// See the documentation for
|
186 |
+
/// `torch::nn::functional::BinaryCrossEntropyFuncOptions` class to learn what
|
187 |
+
/// optional arguments are supported for this functional.
|
188 |
+
///
|
189 |
+
/// Example:
|
190 |
+
/// ```
|
191 |
+
/// namespace F = torch::nn::functional;
|
192 |
+
/// F::binary_cross_entropy(input, target,
|
193 |
+
/// F::BinaryCrossEntropyFuncOptions().weight(weight));
|
194 |
+
/// ```
|
195 |
+
inline Tensor binary_cross_entropy(
|
196 |
+
const Tensor& input,
|
197 |
+
const Tensor& target,
|
198 |
+
const BinaryCrossEntropyFuncOptions& options = {}) {
|
199 |
+
return detail::binary_cross_entropy(
|
200 |
+
input, target, options.weight(), options.reduction());
|
201 |
+
}
|
202 |
+
|
203 |
+
// ============================================================================
|
204 |
+
|
205 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
206 |
+
namespace detail {
|
207 |
+
inline Tensor hinge_embedding_loss(
|
208 |
+
const Tensor& input,
|
209 |
+
const Tensor& target,
|
210 |
+
double margin,
|
211 |
+
HingeEmbeddingLossFuncOptions::reduction_t reduction) {
|
212 |
+
return torch::hinge_embedding_loss(
|
213 |
+
input, target, margin, enumtype::reduction_get_enum(reduction));
|
214 |
+
}
|
215 |
+
} // namespace detail
|
216 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
217 |
+
|
218 |
+
/// See
|
219 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hinge_embedding_loss
|
220 |
+
/// about the exact behavior of this functional.
|
221 |
+
///
|
222 |
+
/// See the documentation for
|
223 |
+
/// `torch::nn::functional::HingeEmbeddingLossFuncOptions` class to learn what
|
224 |
+
/// optional arguments are supported for this functional.
|
225 |
+
///
|
226 |
+
/// Example:
|
227 |
+
/// ```
|
228 |
+
/// namespace F = torch::nn::functional;
|
229 |
+
/// F::hinge_embedding_loss(input, target,
|
230 |
+
/// F::HingeEmbeddingLossFuncOptions().margin(2));
|
231 |
+
/// ```
|
232 |
+
inline Tensor hinge_embedding_loss(
|
233 |
+
const Tensor& input,
|
234 |
+
const Tensor& target,
|
235 |
+
const HingeEmbeddingLossFuncOptions& options = {}) {
|
236 |
+
return detail::hinge_embedding_loss(
|
237 |
+
input, target, options.margin(), options.reduction());
|
238 |
+
}
|
239 |
+
|
240 |
+
// ============================================================================
|
241 |
+
|
242 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
243 |
+
namespace detail {
|
244 |
+
inline Tensor multi_margin_loss(
|
245 |
+
const Tensor& input,
|
246 |
+
const Tensor& target,
|
247 |
+
int64_t p,
|
248 |
+
double margin,
|
249 |
+
const Tensor& weight,
|
250 |
+
MultiMarginLossFuncOptions::reduction_t reduction) {
|
251 |
+
TORCH_CHECK(p == 1 || p == 2, "only p == 1 and p == 2 supported");
|
252 |
+
if (weight.defined()) {
|
253 |
+
TORCH_CHECK(weight.dim() == 1, "weight must be one-dimensional");
|
254 |
+
}
|
255 |
+
|
256 |
+
return torch::multi_margin_loss(
|
257 |
+
input,
|
258 |
+
target,
|
259 |
+
p,
|
260 |
+
margin,
|
261 |
+
weight,
|
262 |
+
enumtype::reduction_get_enum(reduction));
|
263 |
+
}
|
264 |
+
} // namespace detail
|
265 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
266 |
+
|
267 |
+
/// See
|
268 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multi_margin_loss
|
269 |
+
/// about the exact behavior of this functional.
|
270 |
+
///
|
271 |
+
/// See the documentation for
|
272 |
+
/// `torch::nn::functional::MultiMarginLossFuncOptions` class to learn what
|
273 |
+
/// optional arguments are supported for this functional.
|
274 |
+
///
|
275 |
+
/// Example:
|
276 |
+
/// ```
|
277 |
+
/// namespace F = torch::nn::functional;
|
278 |
+
/// F::multi_margin_loss(input, target,
|
279 |
+
/// F::MultiMarginLossFuncOptions().margin(2).weight(weight));
|
280 |
+
/// ```
|
281 |
+
inline Tensor multi_margin_loss(
|
282 |
+
const Tensor& input,
|
283 |
+
const Tensor& target,
|
284 |
+
const MultiMarginLossFuncOptions& options = {}) {
|
285 |
+
return detail::multi_margin_loss(
|
286 |
+
input,
|
287 |
+
target,
|
288 |
+
options.p(),
|
289 |
+
options.margin(),
|
290 |
+
options.weight(),
|
291 |
+
options.reduction());
|
292 |
+
}
|
293 |
+
|
294 |
+
// ============================================================================
|
295 |
+
|
296 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
297 |
+
namespace detail {
|
298 |
+
inline Tensor cosine_embedding_loss(
|
299 |
+
const Tensor& input1,
|
300 |
+
const Tensor& input2,
|
301 |
+
const Tensor& target,
|
302 |
+
double margin,
|
303 |
+
CosineEmbeddingLossFuncOptions::reduction_t reduction) {
|
304 |
+
return torch::cosine_embedding_loss(
|
305 |
+
input1, input2, target, margin, enumtype::reduction_get_enum(reduction));
|
306 |
+
}
|
307 |
+
} // namespace detail
|
308 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
309 |
+
|
310 |
+
/// See
|
311 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cosine_embedding_loss
|
312 |
+
/// about the exact behavior of this functional.
|
313 |
+
///
|
314 |
+
/// See the documentation for
|
315 |
+
/// `torch::nn::functional::CosineEmbeddingLossFuncOptions` class to learn what
|
316 |
+
/// optional arguments are supported for this functional.
|
317 |
+
///
|
318 |
+
/// Example:
|
319 |
+
/// ```
|
320 |
+
/// namespace F = torch::nn::functional;
|
321 |
+
/// F::cosine_embedding_loss(input1, input2, target,
|
322 |
+
/// F::CosineEmbeddingLossFuncOptions().margin(0.5));
|
323 |
+
/// ```
|
324 |
+
inline Tensor cosine_embedding_loss(
|
325 |
+
const Tensor& input1,
|
326 |
+
const Tensor& input2,
|
327 |
+
const Tensor& target,
|
328 |
+
const CosineEmbeddingLossFuncOptions& options = {}) {
|
329 |
+
return detail::cosine_embedding_loss(
|
330 |
+
input1, input2, target, options.margin(), options.reduction());
|
331 |
+
}
|
332 |
+
|
333 |
+
// ============================================================================
|
334 |
+
|
335 |
+
inline Tensor _smooth_l1_loss(
|
336 |
+
const Tensor& input,
|
337 |
+
const Tensor& target,
|
338 |
+
double beta = 1.) {
|
339 |
+
auto t = torch::abs(input - target);
|
340 |
+
return torch::where(t < beta, 0.5 * torch::pow(t, 2) / beta, t - 0.5 * beta);
|
341 |
+
}
|
342 |
+
|
343 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
344 |
+
namespace detail {
|
345 |
+
inline Tensor smooth_l1_loss(
|
346 |
+
const Tensor& input,
|
347 |
+
const Tensor& target,
|
348 |
+
SmoothL1LossFuncOptions::reduction_t reduction,
|
349 |
+
c10::optional<double> beta_opt = c10::nullopt) {
|
350 |
+
if (target.sizes() != input.sizes()) {
|
351 |
+
TORCH_WARN(
|
352 |
+
"Using a target size (",
|
353 |
+
target.sizes(),
|
354 |
+
") that is different to the input size (",
|
355 |
+
input.sizes(),
|
356 |
+
"). ",
|
357 |
+
"This will likely lead to incorrect results due to broadcasting. ",
|
358 |
+
"Please ensure they have the same size.");
|
359 |
+
}
|
360 |
+
double beta = beta_opt.value_or(1.0);
|
361 |
+
|
362 |
+
std::vector<Tensor> expanded_tensors =
|
363 |
+
torch::broadcast_tensors({input, target});
|
364 |
+
return torch::smooth_l1_loss(
|
365 |
+
expanded_tensors[0],
|
366 |
+
expanded_tensors[1],
|
367 |
+
enumtype::reduction_get_enum(reduction),
|
368 |
+
beta);
|
369 |
+
}
|
370 |
+
} // namespace detail
|
371 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
372 |
+
|
373 |
+
/// See
|
374 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.smooth_l1_loss
|
375 |
+
/// about the exact behavior of this functional.
|
376 |
+
///
|
377 |
+
/// See the documentation for `torch::nn::functional::SmoothL1LossFuncOptions`
|
378 |
+
/// class to learn what optional arguments are supported for this functional.
|
379 |
+
///
|
380 |
+
/// Example:
|
381 |
+
/// ```
|
382 |
+
/// namespace F = torch::nn::functional;
|
383 |
+
/// F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone));
|
384 |
+
/// ```
|
385 |
+
inline Tensor smooth_l1_loss(
|
386 |
+
const Tensor& input,
|
387 |
+
const Tensor& target,
|
388 |
+
const SmoothL1LossFuncOptions& options = {}) {
|
389 |
+
return detail::smooth_l1_loss(
|
390 |
+
input, target, options.reduction(), options.beta());
|
391 |
+
}
|
392 |
+
|
393 |
+
/// See
|
394 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.smooth_l1_loss
|
395 |
+
/// about the exact behavior of this functional.
|
396 |
+
///
|
397 |
+
/// Example:
|
398 |
+
/// ```
|
399 |
+
/// namespace F = torch::nn::functional;
|
400 |
+
/// F::smooth_l1_loss(input, target, /*options=*/torch::kNone, /*beta=*/0.5);
|
401 |
+
/// ```
|
402 |
+
inline Tensor smooth_l1_loss(
|
403 |
+
const Tensor& input,
|
404 |
+
const Tensor& target,
|
405 |
+
const SmoothL1LossFuncOptions& options,
|
406 |
+
double beta) {
|
407 |
+
TORCH_CHECK(
|
408 |
+
options.beta() == c10::nullopt,
|
409 |
+
"expected beta not to be provided in 'options', but got ",
|
410 |
+
options.beta().value());
|
411 |
+
return detail::smooth_l1_loss(input, target, options.reduction(), beta);
|
412 |
+
}
|
413 |
+
|
414 |
+
// ============================================================================
|
415 |
+
|
416 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
417 |
+
namespace detail {
|
418 |
+
inline Tensor huber_loss(
|
419 |
+
const Tensor& input,
|
420 |
+
const Tensor& target,
|
421 |
+
HuberLossFuncOptions::reduction_t reduction,
|
422 |
+
double delta = 1.) {
|
423 |
+
if (target.sizes() != input.sizes()) {
|
424 |
+
TORCH_WARN(
|
425 |
+
"Using a target size (",
|
426 |
+
target.sizes(),
|
427 |
+
") that is different to the input size (",
|
428 |
+
input.sizes(),
|
429 |
+
"). ",
|
430 |
+
"This will likely lead to incorrect results due to broadcasting. ",
|
431 |
+
"Please ensure they have the same size.");
|
432 |
+
}
|
433 |
+
|
434 |
+
std::vector<Tensor> expanded_tensors =
|
435 |
+
torch::broadcast_tensors({input, target});
|
436 |
+
return torch::huber_loss(
|
437 |
+
expanded_tensors[0],
|
438 |
+
expanded_tensors[1],
|
439 |
+
enumtype::reduction_get_enum(reduction),
|
440 |
+
delta);
|
441 |
+
}
|
442 |
+
} // namespace detail
|
443 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
444 |
+
|
445 |
+
/// See
|
446 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.huber_loss
|
447 |
+
/// about the exact behavior of this functional.
|
448 |
+
///
|
449 |
+
/// See the documentation for `torch::nn::functional::HuberLossFuncOptions`
|
450 |
+
/// class to learn what optional arguments are supported for this functional.
|
451 |
+
///
|
452 |
+
/// Example:
|
453 |
+
/// ```
|
454 |
+
/// namespace F = torch::nn::functional;
|
455 |
+
/// F::huber_loss(input, target,
|
456 |
+
/// F::HuberLossFuncOptions().reduction(torch::kNone).delta(0.5));
|
457 |
+
/// ```
|
458 |
+
inline Tensor huber_loss(
|
459 |
+
const Tensor& input,
|
460 |
+
const Tensor& target,
|
461 |
+
const HuberLossFuncOptions& options = {}) {
|
462 |
+
return detail::huber_loss(
|
463 |
+
input, target, options.reduction(), options.delta());
|
464 |
+
}
|
465 |
+
|
466 |
+
// ============================================================================
|
467 |
+
|
468 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
469 |
+
namespace detail {
|
470 |
+
inline Tensor multilabel_margin_loss(
|
471 |
+
const Tensor& input,
|
472 |
+
const Tensor& target,
|
473 |
+
MultilabelMarginLossFuncOptions::reduction_t reduction) {
|
474 |
+
return torch::multilabel_margin_loss(
|
475 |
+
input, target, enumtype::reduction_get_enum(reduction));
|
476 |
+
}
|
477 |
+
} // namespace detail
|
478 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
479 |
+
|
480 |
+
/// See
|
481 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multilabel_margin_loss
|
482 |
+
/// about the exact behavior of this functional.
|
483 |
+
///
|
484 |
+
/// See the documentation for
|
485 |
+
/// `torch::nn::functional::MultilabelMarginLossFuncOptions` class to learn what
|
486 |
+
/// optional arguments are supported for this functional.
|
487 |
+
///
|
488 |
+
/// Example:
|
489 |
+
/// ```
|
490 |
+
/// namespace F = torch::nn::functional;
|
491 |
+
/// F::multilabel_margin_loss(input, target,
|
492 |
+
/// F::MultilabelMarginLossFuncOptions(torch::kNone));
|
493 |
+
/// ```
|
494 |
+
inline Tensor multilabel_margin_loss(
|
495 |
+
const Tensor& input,
|
496 |
+
const Tensor& target,
|
497 |
+
const MultilabelMarginLossFuncOptions& options = {}) {
|
498 |
+
return detail::multilabel_margin_loss(input, target, options.reduction());
|
499 |
+
}
|
500 |
+
|
501 |
+
// ============================================================================
|
502 |
+
|
503 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
504 |
+
namespace detail {
|
505 |
+
inline Tensor soft_margin_loss(
|
506 |
+
const Tensor& input,
|
507 |
+
const Tensor& target,
|
508 |
+
SoftMarginLossFuncOptions::reduction_t reduction) {
|
509 |
+
return torch::soft_margin_loss(
|
510 |
+
input, target, enumtype::reduction_get_enum(reduction));
|
511 |
+
}
|
512 |
+
} // namespace detail
|
513 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
514 |
+
|
515 |
+
/// See
|
516 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.soft_margin_loss
|
517 |
+
/// about the exact behavior of this functional.
|
518 |
+
///
|
519 |
+
/// See the documentation for `torch::nn::functional::SoftMarginLossFuncOptions`
|
520 |
+
/// class to learn what optional arguments are supported for this functional.
|
521 |
+
///
|
522 |
+
/// Example:
|
523 |
+
/// ```
|
524 |
+
/// namespace F = torch::nn::functional;
|
525 |
+
/// F::soft_margin_loss(input, target,
|
526 |
+
/// F::SoftMarginLossFuncOptions(torch::kNone));
|
527 |
+
/// ```
|
528 |
+
inline Tensor soft_margin_loss(
|
529 |
+
const Tensor& input,
|
530 |
+
const Tensor& target,
|
531 |
+
const SoftMarginLossFuncOptions& options = {}) {
|
532 |
+
return detail::soft_margin_loss(input, target, options.reduction());
|
533 |
+
}
|
534 |
+
|
535 |
+
// ============================================================================
|
536 |
+
|
537 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
538 |
+
namespace detail {
|
539 |
+
inline Tensor multilabel_soft_margin_loss(
|
540 |
+
const Tensor& input,
|
541 |
+
const Tensor& target,
|
542 |
+
const Tensor& weight,
|
543 |
+
MultilabelSoftMarginLossFuncOptions::reduction_t reduction) {
|
544 |
+
auto loss =
|
545 |
+
-(target * torch::log_sigmoid(input) +
|
546 |
+
(1 - target) * torch::log_sigmoid(-input));
|
547 |
+
if (weight.defined()) {
|
548 |
+
loss = loss * weight;
|
549 |
+
}
|
550 |
+
|
551 |
+
auto class_dim = input.dim() - 1;
|
552 |
+
auto C = input.size(class_dim);
|
553 |
+
loss = loss.sum(class_dim) / C; // only return N loss values
|
554 |
+
|
555 |
+
Tensor ret;
|
556 |
+
|
557 |
+
if (std::holds_alternative<enumtype::kNone>(reduction)) {
|
558 |
+
ret = loss;
|
559 |
+
} else if (std::holds_alternative<enumtype::kMean>(reduction)) {
|
560 |
+
ret = loss.mean();
|
561 |
+
} else if (std::holds_alternative<enumtype::kSum>(reduction)) {
|
562 |
+
ret = loss.sum();
|
563 |
+
} else {
|
564 |
+
ret = input;
|
565 |
+
TORCH_INTERNAL_ASSERT(
|
566 |
+
false, enumtype::get_enum_name(reduction), " is not valid");
|
567 |
+
}
|
568 |
+
return ret;
|
569 |
+
}
|
570 |
+
} // namespace detail
|
571 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
572 |
+
|
573 |
+
/// See
|
574 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multilabel_soft_margin_loss
|
575 |
+
/// about the exact behavior of this functional.
|
576 |
+
///
|
577 |
+
/// See the documentation for
|
578 |
+
/// `torch::nn::functional::MultilabelSoftMarginLossFuncOptions` class to learn
|
579 |
+
/// what optional arguments are supported for this functional.
|
580 |
+
///
|
581 |
+
/// Example:
|
582 |
+
/// ```
|
583 |
+
/// namespace F = torch::nn::functional;
|
584 |
+
/// F::multilabel_soft_margin_loss(input, target,
|
585 |
+
/// F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight));
|
586 |
+
/// ```
|
587 |
+
inline Tensor multilabel_soft_margin_loss(
|
588 |
+
const Tensor& input,
|
589 |
+
const Tensor& target,
|
590 |
+
const MultilabelSoftMarginLossFuncOptions& options = {}) {
|
591 |
+
return detail::multilabel_soft_margin_loss(
|
592 |
+
input, target, options.weight(), options.reduction());
|
593 |
+
}
|
594 |
+
|
595 |
+
// ============================================================================
|
596 |
+
|
597 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
598 |
+
namespace detail {
|
599 |
+
inline Tensor triplet_margin_loss(
|
600 |
+
const Tensor& anchor,
|
601 |
+
const Tensor& positive,
|
602 |
+
const Tensor& negative,
|
603 |
+
double margin,
|
604 |
+
double p,
|
605 |
+
double eps,
|
606 |
+
bool swap,
|
607 |
+
TripletMarginLossFuncOptions::reduction_t reduction) {
|
608 |
+
return torch::triplet_margin_loss(
|
609 |
+
anchor,
|
610 |
+
positive,
|
611 |
+
negative,
|
612 |
+
margin,
|
613 |
+
p,
|
614 |
+
eps,
|
615 |
+
swap,
|
616 |
+
enumtype::reduction_get_enum(reduction));
|
617 |
+
}
|
618 |
+
} // namespace detail
|
619 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
620 |
+
|
621 |
+
/// See
|
622 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.triplet_margin_loss
|
623 |
+
/// about the exact behavior of this functional.
|
624 |
+
///
|
625 |
+
/// See the documentation for
|
626 |
+
/// `torch::nn::functional::TripletMarginLossFuncOptions` class to learn what
|
627 |
+
/// optional arguments are supported for this functional.
|
628 |
+
///
|
629 |
+
/// Example:
|
630 |
+
/// ```
|
631 |
+
/// namespace F = torch::nn::functional;
|
632 |
+
/// F::triplet_margin_loss(anchor, positive, negative,
|
633 |
+
/// F::TripletMarginLossFuncOptions().margin(1.0));
|
634 |
+
/// ```
|
635 |
+
inline Tensor triplet_margin_loss(
|
636 |
+
const Tensor& anchor,
|
637 |
+
const Tensor& positive,
|
638 |
+
const Tensor& negative,
|
639 |
+
const TripletMarginLossFuncOptions& options = {}) {
|
640 |
+
return detail::triplet_margin_loss(
|
641 |
+
anchor,
|
642 |
+
positive,
|
643 |
+
negative,
|
644 |
+
options.margin(),
|
645 |
+
options.p(),
|
646 |
+
options.eps(),
|
647 |
+
options.swap(),
|
648 |
+
options.reduction());
|
649 |
+
}
|
650 |
+
|
651 |
+
// ============================================================================
|
652 |
+
|
653 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
654 |
+
namespace detail {
|
655 |
+
inline Tensor triplet_margin_with_distance_loss(
|
656 |
+
const Tensor& anchor,
|
657 |
+
const Tensor& positive,
|
658 |
+
const Tensor& negative,
|
659 |
+
c10::optional<TripletMarginWithDistanceLossFuncOptions::distance_function_t>
|
660 |
+
distance_function,
|
661 |
+
double margin,
|
662 |
+
bool swap,
|
663 |
+
TripletMarginWithDistanceLossFuncOptions::reduction_t reduction) {
|
664 |
+
Tensor dist_pos, dist_neg;
|
665 |
+
if (distance_function.has_value()) {
|
666 |
+
auto distance_function_impl = distance_function.value();
|
667 |
+
dist_pos = distance_function_impl(anchor, positive);
|
668 |
+
dist_neg = distance_function_impl(anchor, negative);
|
669 |
+
} else {
|
670 |
+
dist_pos = pairwise_distance(anchor, positive);
|
671 |
+
dist_neg = pairwise_distance(anchor, negative);
|
672 |
+
}
|
673 |
+
|
674 |
+
if (swap) {
|
675 |
+
Tensor dist_swap;
|
676 |
+
if (distance_function.has_value()) {
|
677 |
+
dist_swap = distance_function.value()(positive, negative);
|
678 |
+
} else {
|
679 |
+
dist_swap = pairwise_distance(positive, negative);
|
680 |
+
}
|
681 |
+
dist_neg = torch::min(dist_neg, dist_swap);
|
682 |
+
}
|
683 |
+
|
684 |
+
auto loss = torch::clamp_min(dist_pos - dist_neg + margin, 0);
|
685 |
+
|
686 |
+
Tensor ret;
|
687 |
+
if (std::holds_alternative<enumtype::kNone>(reduction)) {
|
688 |
+
ret = loss;
|
689 |
+
} else if (std::holds_alternative<enumtype::kMean>(reduction)) {
|
690 |
+
ret = loss.mean();
|
691 |
+
} else if (std::holds_alternative<enumtype::kSum>(reduction)) {
|
692 |
+
ret = loss.sum();
|
693 |
+
} else {
|
694 |
+
ret = anchor;
|
695 |
+
TORCH_INTERNAL_ASSERT(
|
696 |
+
false, enumtype::get_enum_name(reduction), " is not valid");
|
697 |
+
}
|
698 |
+
return ret;
|
699 |
+
}
|
700 |
+
} // namespace detail
|
701 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
702 |
+
|
703 |
+
/// See
|
704 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.triplet_margin_with_distance_loss
|
705 |
+
/// about the exact behavior of this functional.
|
706 |
+
///
|
707 |
+
/// See the documentation for
|
708 |
+
/// `torch::nn::functional::TripletMarginWithDistanceLossFuncOptions` class to
|
709 |
+
/// learn what optional arguments are supported for this functional.
|
710 |
+
///
|
711 |
+
/// Example:
|
712 |
+
/// ```
|
713 |
+
/// namespace F = torch::nn::functional;
|
714 |
+
/// F::triplet_margin_with_distance_loss(anchor, positive, negative,
|
715 |
+
/// F::TripletMarginWithDistanceLossFuncOptions().margin(1.0));
|
716 |
+
/// ```
|
717 |
+
inline Tensor triplet_margin_with_distance_loss(
|
718 |
+
const Tensor& anchor,
|
719 |
+
const Tensor& positive,
|
720 |
+
const Tensor& negative,
|
721 |
+
const TripletMarginWithDistanceLossFuncOptions& options = {}) {
|
722 |
+
return detail::triplet_margin_with_distance_loss(
|
723 |
+
anchor,
|
724 |
+
positive,
|
725 |
+
negative,
|
726 |
+
options.distance_function(),
|
727 |
+
options.margin(),
|
728 |
+
options.swap(),
|
729 |
+
options.reduction());
|
730 |
+
}
|
731 |
+
|
732 |
+
// ============================================================================
|
733 |
+
|
734 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
735 |
+
namespace detail {
|
736 |
+
inline Tensor ctc_loss(
|
737 |
+
const Tensor& log_probs,
|
738 |
+
const Tensor& targets,
|
739 |
+
const Tensor& input_lengths,
|
740 |
+
const Tensor& target_lengths,
|
741 |
+
int64_t blank,
|
742 |
+
CTCLossFuncOptions::reduction_t reduction,
|
743 |
+
bool zero_infinity) {
|
744 |
+
return torch::ctc_loss(
|
745 |
+
log_probs,
|
746 |
+
targets,
|
747 |
+
input_lengths,
|
748 |
+
target_lengths,
|
749 |
+
blank,
|
750 |
+
enumtype::reduction_get_enum(reduction),
|
751 |
+
zero_infinity);
|
752 |
+
}
|
753 |
+
} // namespace detail
|
754 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
755 |
+
|
756 |
+
/// See
|
757 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.ctc_loss
|
758 |
+
/// about the exact behavior of this functional.
|
759 |
+
///
|
760 |
+
/// See the documentation for `torch::nn::functional::CTCLossFuncOptions` class
|
761 |
+
/// to learn what optional arguments are supported for this functional.
|
762 |
+
///
|
763 |
+
/// Example:
|
764 |
+
/// ```
|
765 |
+
/// namespace F = torch::nn::functional;
|
766 |
+
/// F::ctc_loss(log_probs, targets, input_lengths, target_lengths,
|
767 |
+
/// F::CTCLossFuncOptions().reduction(torch::kNone));
|
768 |
+
/// ```
|
769 |
+
inline Tensor ctc_loss(
|
770 |
+
const Tensor& log_probs,
|
771 |
+
const Tensor& targets,
|
772 |
+
const Tensor& input_lengths,
|
773 |
+
const Tensor& target_lengths,
|
774 |
+
const CTCLossFuncOptions& options = {}) {
|
775 |
+
return detail::ctc_loss(
|
776 |
+
log_probs,
|
777 |
+
targets,
|
778 |
+
input_lengths,
|
779 |
+
target_lengths,
|
780 |
+
options.blank(),
|
781 |
+
options.reduction(),
|
782 |
+
options.zero_infinity());
|
783 |
+
}
|
784 |
+
|
785 |
+
// ============================================================================
|
786 |
+
|
787 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
788 |
+
namespace detail {
|
789 |
+
inline Tensor poisson_nll_loss(
|
790 |
+
const Tensor& input,
|
791 |
+
const Tensor& target,
|
792 |
+
bool log_input,
|
793 |
+
bool full,
|
794 |
+
double eps,
|
795 |
+
PoissonNLLLossFuncOptions::reduction_t reduction) {
|
796 |
+
return torch::poisson_nll_loss(
|
797 |
+
input,
|
798 |
+
target,
|
799 |
+
log_input,
|
800 |
+
full,
|
801 |
+
eps,
|
802 |
+
enumtype::reduction_get_enum(reduction));
|
803 |
+
}
|
804 |
+
} // namespace detail
|
805 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
806 |
+
|
807 |
+
/// See
|
808 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.poisson_nll_loss
|
809 |
+
/// about the exact behavior of this functional.
|
810 |
+
///
|
811 |
+
/// See the documentation for `torch::nn::functional::PoissonNLLLossFuncOptions`
|
812 |
+
/// class to learn what optional arguments are supported for this functional.
|
813 |
+
///
|
814 |
+
/// Example:
|
815 |
+
/// ```
|
816 |
+
/// namespace F = torch::nn::functional;
|
817 |
+
/// F::poisson_nll_loss(input, target,
|
818 |
+
/// F::PoissonNLLLossFuncOptions().reduction(torch::kNone));
|
819 |
+
/// ```
|
820 |
+
inline Tensor poisson_nll_loss(
|
821 |
+
const Tensor& input,
|
822 |
+
const Tensor& target,
|
823 |
+
const PoissonNLLLossFuncOptions& options = {}) {
|
824 |
+
return detail::poisson_nll_loss(
|
825 |
+
input,
|
826 |
+
target,
|
827 |
+
options.log_input(),
|
828 |
+
options.full(),
|
829 |
+
options.eps(),
|
830 |
+
options.reduction());
|
831 |
+
}
|
832 |
+
|
833 |
+
// ============================================================================
|
834 |
+
|
835 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
836 |
+
namespace detail {
|
837 |
+
inline Tensor margin_ranking_loss(
|
838 |
+
const Tensor& input1,
|
839 |
+
const Tensor& input2,
|
840 |
+
const Tensor& target,
|
841 |
+
double margin,
|
842 |
+
MarginRankingLossFuncOptions::reduction_t reduction) {
|
843 |
+
TORCH_CHECK(
|
844 |
+
input1.dim() == input2.dim() && input1.dim() == target.dim(),
|
845 |
+
"margin_ranking_loss : All input tensors should have same dimension but got sizes: "
|
846 |
+
"input1: ",
|
847 |
+
input1.sizes(),
|
848 |
+
", input2: ",
|
849 |
+
input2.sizes(),
|
850 |
+
", target: ",
|
851 |
+
target.sizes());
|
852 |
+
return torch::margin_ranking_loss(
|
853 |
+
input1, input2, target, margin, enumtype::reduction_get_enum(reduction));
|
854 |
+
}
|
855 |
+
} // namespace detail
|
856 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
857 |
+
|
858 |
+
/// See
|
859 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.margin_ranking_loss
|
860 |
+
/// about the exact behavior of this functional.
|
861 |
+
///
|
862 |
+
/// See the documentation for
|
863 |
+
/// `torch::nn::functional::MarginRankingLossFuncOptions` class to learn what
|
864 |
+
/// optional arguments are supported for this functional.
|
865 |
+
///
|
866 |
+
/// Example:
|
867 |
+
/// ```
|
868 |
+
/// namespace F = torch::nn::functional;
|
869 |
+
/// F::margin_ranking_loss(input1, input2, target,
|
870 |
+
/// F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum));
|
871 |
+
/// ```
|
872 |
+
inline Tensor margin_ranking_loss(
|
873 |
+
const Tensor& input1,
|
874 |
+
const Tensor& input2,
|
875 |
+
const Tensor& target,
|
876 |
+
const MarginRankingLossFuncOptions& options = {}) {
|
877 |
+
return detail::margin_ranking_loss(
|
878 |
+
input1, input2, target, options.margin(), options.reduction());
|
879 |
+
}
|
880 |
+
|
881 |
+
// ============================================================================
|
882 |
+
|
883 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
884 |
+
namespace detail {
|
885 |
+
inline Tensor nll_loss(
|
886 |
+
const Tensor& input,
|
887 |
+
const Tensor& target,
|
888 |
+
const Tensor& weight,
|
889 |
+
int64_t ignore_index,
|
890 |
+
const NLLLossFuncOptions::reduction_t reduction) {
|
891 |
+
if (input.dim() < 2) {
|
892 |
+
TORCH_CHECK(false, "Expected 2 or more dimensions (got ", input.dim(), ")");
|
893 |
+
}
|
894 |
+
|
895 |
+
if (input.sizes()[0] != target.sizes()[0]) {
|
896 |
+
TORCH_CHECK(
|
897 |
+
false,
|
898 |
+
"Expected input batch_size (",
|
899 |
+
input.sizes()[0],
|
900 |
+
") to match target batch_size (",
|
901 |
+
target.sizes()[0],
|
902 |
+
").");
|
903 |
+
}
|
904 |
+
|
905 |
+
return torch::nll_loss_nd(
|
906 |
+
input,
|
907 |
+
target,
|
908 |
+
weight,
|
909 |
+
enumtype::reduction_get_enum(reduction),
|
910 |
+
ignore_index);
|
911 |
+
}
|
912 |
+
} // namespace detail
|
913 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
914 |
+
|
915 |
+
/// See
|
916 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.nll_loss
|
917 |
+
/// about the exact behavior of this functional.
|
918 |
+
///
|
919 |
+
/// See the documentation for `torch::nn::functional::NLLLossFuncOptions` class
|
920 |
+
/// to learn what optional arguments are supported for this functional.
|
921 |
+
///
|
922 |
+
/// Example:
|
923 |
+
/// ```
|
924 |
+
/// namespace F = torch::nn::functional;
|
925 |
+
/// F::nll_loss(input, target,
|
926 |
+
/// F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean));
|
927 |
+
/// ```
|
928 |
+
inline Tensor nll_loss(
|
929 |
+
const Tensor& input,
|
930 |
+
const Tensor& target,
|
931 |
+
const NLLLossFuncOptions& options = {}) {
|
932 |
+
return detail::nll_loss(
|
933 |
+
input,
|
934 |
+
target,
|
935 |
+
options.weight(),
|
936 |
+
options.ignore_index(),
|
937 |
+
options.reduction());
|
938 |
+
}
|
939 |
+
|
940 |
+
// ============================================================================
|
941 |
+
|
942 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
943 |
+
namespace detail {
|
944 |
+
inline Tensor cross_entropy(
|
945 |
+
const Tensor& input,
|
946 |
+
const Tensor& target,
|
947 |
+
const Tensor& weight,
|
948 |
+
int64_t ignore_index,
|
949 |
+
CrossEntropyFuncOptions::reduction_t reduction,
|
950 |
+
double label_smoothing) {
|
951 |
+
return torch::cross_entropy_loss(
|
952 |
+
input,
|
953 |
+
target,
|
954 |
+
weight,
|
955 |
+
enumtype::reduction_get_enum(reduction),
|
956 |
+
ignore_index,
|
957 |
+
label_smoothing);
|
958 |
+
}
|
959 |
+
} // namespace detail
|
960 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
961 |
+
|
962 |
+
/// See
|
963 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cross_entropy
|
964 |
+
/// about the exact behavior of this functional.
|
965 |
+
///
|
966 |
+
/// See the documentation for `torch::nn::functional::CrossEntropyFuncOptions`
|
967 |
+
/// class to learn what optional arguments are supported for this functional.
|
968 |
+
///
|
969 |
+
/// Example:
|
970 |
+
/// ```
|
971 |
+
/// namespace F = torch::nn::functional;
|
972 |
+
/// F::cross_entropy(input, target,
|
973 |
+
/// F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean));
|
974 |
+
/// ```
|
975 |
+
inline Tensor cross_entropy(
|
976 |
+
const Tensor& input,
|
977 |
+
const Tensor& target,
|
978 |
+
const CrossEntropyFuncOptions& options = {}) {
|
979 |
+
return detail::cross_entropy(
|
980 |
+
input,
|
981 |
+
target,
|
982 |
+
options.weight(),
|
983 |
+
options.ignore_index(),
|
984 |
+
options.reduction(),
|
985 |
+
options.label_smoothing());
|
986 |
+
}
|
987 |
+
|
988 |
+
// ============================================================================
|
989 |
+
|
990 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
991 |
+
namespace detail {
|
992 |
+
inline Tensor binary_cross_entropy_with_logits(
|
993 |
+
const Tensor& input,
|
994 |
+
const Tensor& target,
|
995 |
+
const Tensor& weight,
|
996 |
+
BinaryCrossEntropyWithLogitsFuncOptions::reduction_t reduction,
|
997 |
+
const Tensor& pos_weight) {
|
998 |
+
TORCH_CHECK(
|
999 |
+
target.sizes() == input.sizes(),
|
1000 |
+
"Target size (",
|
1001 |
+
target.sizes(),
|
1002 |
+
") must be the same as input size (",
|
1003 |
+
input.sizes(),
|
1004 |
+
")");
|
1005 |
+
|
1006 |
+
return torch::binary_cross_entropy_with_logits(
|
1007 |
+
input,
|
1008 |
+
target,
|
1009 |
+
weight,
|
1010 |
+
pos_weight,
|
1011 |
+
enumtype::reduction_get_enum(reduction));
|
1012 |
+
}
|
1013 |
+
} // namespace detail
|
1014 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
1015 |
+
|
1016 |
+
/// See
|
1017 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.binary_cross_entropy_with_logits
|
1018 |
+
/// about the exact behavior of this functional.
|
1019 |
+
///
|
1020 |
+
/// See the documentation for
|
1021 |
+
/// `torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions` class to
|
1022 |
+
/// learn what optional arguments are supported for this functional.
|
1023 |
+
///
|
1024 |
+
/// Example:
|
1025 |
+
/// ```
|
1026 |
+
/// namespace F = torch::nn::functional;
|
1027 |
+
/// F::binary_cross_entropy_with_logits(input, target,
|
1028 |
+
/// F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum));
|
1029 |
+
/// ```
|
1030 |
+
inline Tensor binary_cross_entropy_with_logits(
|
1031 |
+
const Tensor& input,
|
1032 |
+
const Tensor& target,
|
1033 |
+
const BinaryCrossEntropyWithLogitsFuncOptions& options = {}) {
|
1034 |
+
return detail::binary_cross_entropy_with_logits(
|
1035 |
+
input,
|
1036 |
+
target,
|
1037 |
+
options.weight(),
|
1038 |
+
options.reduction(),
|
1039 |
+
options.pos_weight());
|
1040 |
+
}
|
1041 |
+
|
1042 |
+
} // namespace functional
|
1043 |
+
} // namespace nn
|
1044 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/normalization.h
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/nn/functional/padding.h>
|
4 |
+
#include <torch/nn/functional/pooling.h>
|
5 |
+
#include <torch/nn/options/normalization.h>
|
6 |
+
#include <torch/types.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace nn {
|
10 |
+
namespace functional {
|
11 |
+
|
12 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
13 |
+
namespace detail {
|
14 |
+
inline Tensor normalize(
|
15 |
+
const Tensor& input,
|
16 |
+
double p,
|
17 |
+
int64_t dim,
|
18 |
+
double eps,
|
19 |
+
c10::optional<Tensor> out) {
|
20 |
+
if (out == c10::nullopt) {
|
21 |
+
auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input);
|
22 |
+
return input / denom;
|
23 |
+
} else {
|
24 |
+
auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input);
|
25 |
+
return torch::div_out(*out, input, denom);
|
26 |
+
}
|
27 |
+
}
|
28 |
+
} // namespace detail
|
29 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
30 |
+
|
31 |
+
/// See
|
32 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.normalize
|
33 |
+
/// about the exact behavior of this functional.
|
34 |
+
///
|
35 |
+
/// See the documentation for `torch::nn::functional::NormalizeFuncOptions`
|
36 |
+
/// class to learn what optional arguments are supported for this functional.
|
37 |
+
///
|
38 |
+
/// Example:
|
39 |
+
/// ```
|
40 |
+
/// namespace F = torch::nn::functional;
|
41 |
+
/// F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1));
|
42 |
+
/// ```
|
43 |
+
inline Tensor normalize(
|
44 |
+
const Tensor& input,
|
45 |
+
NormalizeFuncOptions options = {}) {
|
46 |
+
return detail::normalize(
|
47 |
+
input, options.p(), options.dim(), options.eps(), options.out());
|
48 |
+
}
|
49 |
+
|
50 |
+
// ============================================================================
|
51 |
+
|
52 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
53 |
+
namespace detail {
|
54 |
+
inline Tensor layer_norm(
|
55 |
+
const Tensor& input,
|
56 |
+
const std::vector<int64_t>& normalized_shape,
|
57 |
+
const Tensor& weight,
|
58 |
+
const Tensor& bias,
|
59 |
+
double eps) {
|
60 |
+
return torch::layer_norm(input, normalized_shape, weight, bias, eps);
|
61 |
+
}
|
62 |
+
} // namespace detail
|
63 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
64 |
+
|
65 |
+
/// See
|
66 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.layer_norm
|
67 |
+
/// about the exact behavior of this functional.
|
68 |
+
///
|
69 |
+
/// See the documentation for `torch::nn::functional::LayerNormFuncOptions`
|
70 |
+
/// class to learn what optional arguments are supported for this functional.
|
71 |
+
///
|
72 |
+
/// Example:
|
73 |
+
/// ```
|
74 |
+
/// namespace F = torch::nn::functional;
|
75 |
+
/// F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5));
|
76 |
+
/// ```
|
77 |
+
inline Tensor layer_norm(
|
78 |
+
const Tensor& input,
|
79 |
+
const LayerNormFuncOptions& options) {
|
80 |
+
return detail::layer_norm(
|
81 |
+
input,
|
82 |
+
options.normalized_shape(),
|
83 |
+
options.weight(),
|
84 |
+
options.bias(),
|
85 |
+
options.eps());
|
86 |
+
}
|
87 |
+
|
88 |
+
// ============================================================================
|
89 |
+
|
90 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
91 |
+
namespace detail {
|
92 |
+
inline Tensor local_response_norm(
|
93 |
+
const Tensor& input,
|
94 |
+
int64_t size,
|
95 |
+
double alpha,
|
96 |
+
double beta,
|
97 |
+
double k) {
|
98 |
+
auto dim = input.dim();
|
99 |
+
TORCH_CHECK(
|
100 |
+
dim >= 3,
|
101 |
+
"Expected 3D or higher dimensionality input (got ",
|
102 |
+
dim,
|
103 |
+
" dimensions)");
|
104 |
+
auto div = input.mul(input).unsqueeze(1);
|
105 |
+
if (dim == 3) {
|
106 |
+
div = detail::pad(
|
107 |
+
div,
|
108 |
+
/*pad=*/{0, 0, size / 2, (size - 1) / 2},
|
109 |
+
/*mode=*/torch::kConstant,
|
110 |
+
/*value=*/0);
|
111 |
+
div = detail::avg_pool2d(
|
112 |
+
div,
|
113 |
+
/*kernel_size=*/{size, 1},
|
114 |
+
/*stride=*/1,
|
115 |
+
/*padding=*/0,
|
116 |
+
/*ceil_mode=*/false,
|
117 |
+
/*count_include_pad=*/true,
|
118 |
+
/*divisor_override=*/c10::nullopt)
|
119 |
+
.squeeze(1);
|
120 |
+
} else {
|
121 |
+
auto sizes = input.sizes();
|
122 |
+
div = div.view({sizes[0], 1, sizes[1], sizes[2], -1});
|
123 |
+
div = detail::pad(
|
124 |
+
div,
|
125 |
+
/*pad=*/{0, 0, 0, 0, size / 2, (size - 1) / 2},
|
126 |
+
/*mode=*/torch::kConstant,
|
127 |
+
/*value=*/0);
|
128 |
+
div = detail::avg_pool3d(
|
129 |
+
div,
|
130 |
+
/*kernel_size=*/{size, 1, 1},
|
131 |
+
/*stride=*/1,
|
132 |
+
/*padding=*/0,
|
133 |
+
/*ceil_mode=*/false,
|
134 |
+
/*count_include_pad=*/true,
|
135 |
+
/*divisor_override=*/c10::nullopt)
|
136 |
+
.squeeze(1);
|
137 |
+
div = div.view(sizes);
|
138 |
+
}
|
139 |
+
div = div.mul(alpha).add(k).pow(beta);
|
140 |
+
return input / div;
|
141 |
+
}
|
142 |
+
} // namespace detail
|
143 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
144 |
+
|
145 |
+
/// See
|
146 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.local_response_norm
|
147 |
+
/// about the exact behavior of this functional.
|
148 |
+
///
|
149 |
+
/// See the documentation for
|
150 |
+
/// `torch::nn::functional::LocalResponseNormFuncOptions` class to learn what
|
151 |
+
/// optional arguments are supported for this functional.
|
152 |
+
///
|
153 |
+
/// Example:
|
154 |
+
/// ```
|
155 |
+
/// namespace F = torch::nn::functional;
|
156 |
+
/// F::local_response_norm(x, F::LocalResponseNormFuncOptions(2));
|
157 |
+
/// ```
|
158 |
+
inline Tensor local_response_norm(
|
159 |
+
const Tensor& input,
|
160 |
+
const LocalResponseNormFuncOptions& options) {
|
161 |
+
return detail::local_response_norm(
|
162 |
+
input, options.size(), options.alpha(), options.beta(), options.k());
|
163 |
+
}
|
164 |
+
|
165 |
+
// ============================================================================
|
166 |
+
|
167 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
168 |
+
namespace detail {
|
169 |
+
inline Tensor group_norm(
|
170 |
+
const Tensor& input,
|
171 |
+
int64_t num_groups,
|
172 |
+
const Tensor& weight,
|
173 |
+
const Tensor& bias,
|
174 |
+
double eps) {
|
175 |
+
return torch::group_norm(
|
176 |
+
input,
|
177 |
+
num_groups,
|
178 |
+
weight,
|
179 |
+
bias,
|
180 |
+
eps,
|
181 |
+
at::globalContext().userEnabledCuDNN());
|
182 |
+
}
|
183 |
+
} // namespace detail
|
184 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
185 |
+
|
186 |
+
/// See
|
187 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.group_norm
|
188 |
+
/// about the exact behavior of this functional.
|
189 |
+
///
|
190 |
+
/// See the documentation for `torch::nn::functional::GroupNormFuncOptions`
|
191 |
+
/// class to learn what optional arguments are supported for this functional.
|
192 |
+
///
|
193 |
+
/// Example:
|
194 |
+
/// ```
|
195 |
+
/// namespace F = torch::nn::functional;
|
196 |
+
/// F::group_norm(input, F::GroupNormFuncOptions(2).eps(2e-5));
|
197 |
+
/// ```
|
198 |
+
inline Tensor group_norm(
|
199 |
+
const Tensor& input,
|
200 |
+
const GroupNormFuncOptions& options) {
|
201 |
+
return detail::group_norm(
|
202 |
+
input,
|
203 |
+
options.num_groups(),
|
204 |
+
options.weight(),
|
205 |
+
options.bias(),
|
206 |
+
options.eps());
|
207 |
+
}
|
208 |
+
|
209 |
+
} // namespace functional
|
210 |
+
} // namespace nn
|
211 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pixelshuffle.h
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/nn/options/pixelshuffle.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace nn {
|
7 |
+
namespace functional {
|
8 |
+
|
9 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
10 |
+
namespace detail {
|
11 |
+
inline Tensor pixel_shuffle(const Tensor& input, int64_t upscale_factor) {
|
12 |
+
return torch::pixel_shuffle(input, upscale_factor);
|
13 |
+
}
|
14 |
+
|
15 |
+
inline Tensor pixel_unshuffle(const Tensor& input, int64_t downscale_factor) {
|
16 |
+
return torch::pixel_unshuffle(input, downscale_factor);
|
17 |
+
}
|
18 |
+
} // namespace detail
|
19 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
20 |
+
|
21 |
+
/// See
|
22 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pixel_shuffle
|
23 |
+
/// about the exact behavior of this functional.
|
24 |
+
///
|
25 |
+
/// See the documentation for `torch::nn::functional::PixelShuffleFuncOptions`
|
26 |
+
/// class to learn what optional arguments are supported for this functional.
|
27 |
+
///
|
28 |
+
/// Example:
|
29 |
+
/// ```
|
30 |
+
/// namespace F = torch::nn::functional;
|
31 |
+
/// F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2));
|
32 |
+
/// ```
|
33 |
+
inline Tensor pixel_shuffle(
|
34 |
+
const Tensor& input,
|
35 |
+
const PixelShuffleFuncOptions& options) {
|
36 |
+
return detail::pixel_shuffle(input, options.upscale_factor());
|
37 |
+
}
|
38 |
+
|
39 |
+
inline Tensor pixel_unshuffle(
|
40 |
+
const Tensor& input,
|
41 |
+
const PixelUnshuffleFuncOptions& options) {
|
42 |
+
return detail::pixel_unshuffle(input, options.downscale_factor());
|
43 |
+
}
|
44 |
+
|
45 |
+
} // namespace functional
|
46 |
+
} // namespace nn
|
47 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pooling.h
ADDED
@@ -0,0 +1,1098 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/irange.h>
|
4 |
+
#include <torch/nn/functional/activation.h>
|
5 |
+
#include <torch/nn/modules/utils.h>
|
6 |
+
#include <torch/nn/options/pooling.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace nn {
|
10 |
+
namespace functional {
|
11 |
+
|
12 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
13 |
+
namespace detail {
|
14 |
+
inline Tensor avg_pool1d(
|
15 |
+
const Tensor& input,
|
16 |
+
ExpandingArray<1> kernel_size,
|
17 |
+
ExpandingArray<1> stride,
|
18 |
+
ExpandingArray<1> padding,
|
19 |
+
bool ceil_mode,
|
20 |
+
bool count_include_pad) {
|
21 |
+
return torch::avg_pool1d(
|
22 |
+
input, kernel_size, stride, padding, ceil_mode, count_include_pad);
|
23 |
+
}
|
24 |
+
} // namespace detail
|
25 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
26 |
+
|
27 |
+
/// See
|
28 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool1d
|
29 |
+
/// about the exact behavior of this functional.
|
30 |
+
///
|
31 |
+
/// See the documentation for `torch::nn::functional::AvgPool1dFuncOptions`
|
32 |
+
/// class to learn what optional arguments are supported for this functional.
|
33 |
+
///
|
34 |
+
/// Example:
|
35 |
+
/// ```
|
36 |
+
/// namespace F = torch::nn::functional;
|
37 |
+
/// F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2));
|
38 |
+
/// ```
|
39 |
+
inline Tensor avg_pool1d(
|
40 |
+
const Tensor& input,
|
41 |
+
const AvgPool1dFuncOptions& options) {
|
42 |
+
return avg_pool1d(
|
43 |
+
input,
|
44 |
+
options.kernel_size(),
|
45 |
+
options.stride(),
|
46 |
+
options.padding(),
|
47 |
+
options.ceil_mode(),
|
48 |
+
options.count_include_pad());
|
49 |
+
}
|
50 |
+
|
51 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
52 |
+
namespace detail {
|
53 |
+
inline Tensor avg_pool2d(
|
54 |
+
const Tensor& input,
|
55 |
+
ExpandingArray<2> kernel_size,
|
56 |
+
ExpandingArray<2> stride,
|
57 |
+
ExpandingArray<2> padding,
|
58 |
+
bool ceil_mode,
|
59 |
+
bool count_include_pad,
|
60 |
+
c10::optional<int64_t> divisor_override) {
|
61 |
+
return torch::avg_pool2d(
|
62 |
+
input,
|
63 |
+
kernel_size,
|
64 |
+
stride,
|
65 |
+
padding,
|
66 |
+
ceil_mode,
|
67 |
+
count_include_pad,
|
68 |
+
divisor_override);
|
69 |
+
}
|
70 |
+
} // namespace detail
|
71 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
72 |
+
|
73 |
+
/// See
|
74 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool2d
|
75 |
+
/// about the exact behavior of this functional.
|
76 |
+
///
|
77 |
+
/// See the documentation for `torch::nn::functional::AvgPool2dFuncOptions`
|
78 |
+
/// class to learn what optional arguments are supported for this functional.
|
79 |
+
///
|
80 |
+
/// Example:
|
81 |
+
/// ```
|
82 |
+
/// namespace F = torch::nn::functional;
|
83 |
+
/// F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2));
|
84 |
+
/// ```
|
85 |
+
inline Tensor avg_pool2d(
|
86 |
+
const Tensor& input,
|
87 |
+
const AvgPool2dFuncOptions& options) {
|
88 |
+
return detail::avg_pool2d(
|
89 |
+
input,
|
90 |
+
options.kernel_size(),
|
91 |
+
options.stride(),
|
92 |
+
options.padding(),
|
93 |
+
options.ceil_mode(),
|
94 |
+
options.count_include_pad(),
|
95 |
+
options.divisor_override());
|
96 |
+
}
|
97 |
+
|
98 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
99 |
+
namespace detail {
|
100 |
+
inline Tensor avg_pool3d(
|
101 |
+
const Tensor& input,
|
102 |
+
ExpandingArray<3> kernel_size,
|
103 |
+
ExpandingArray<3> stride,
|
104 |
+
ExpandingArray<3> padding,
|
105 |
+
bool ceil_mode,
|
106 |
+
bool count_include_pad,
|
107 |
+
c10::optional<int64_t> divisor_override) {
|
108 |
+
return torch::avg_pool3d(
|
109 |
+
input,
|
110 |
+
kernel_size,
|
111 |
+
stride,
|
112 |
+
padding,
|
113 |
+
ceil_mode,
|
114 |
+
count_include_pad,
|
115 |
+
divisor_override);
|
116 |
+
}
|
117 |
+
} // namespace detail
|
118 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
119 |
+
|
120 |
+
/// See
|
121 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool3d
|
122 |
+
/// about the exact behavior of this functional.
|
123 |
+
///
|
124 |
+
/// See the documentation for `torch::nn::functional::AvgPool3dFuncOptions`
|
125 |
+
/// class to learn what optional arguments are supported for this functional.
|
126 |
+
///
|
127 |
+
/// Example:
|
128 |
+
/// ```
|
129 |
+
/// namespace F = torch::nn::functional;
|
130 |
+
/// F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2));
|
131 |
+
/// ```
|
132 |
+
inline Tensor avg_pool3d(
|
133 |
+
const Tensor& input,
|
134 |
+
const AvgPool3dFuncOptions& options) {
|
135 |
+
return detail::avg_pool3d(
|
136 |
+
input,
|
137 |
+
options.kernel_size(),
|
138 |
+
options.stride(),
|
139 |
+
options.padding(),
|
140 |
+
options.ceil_mode(),
|
141 |
+
options.count_include_pad(),
|
142 |
+
options.divisor_override());
|
143 |
+
}
|
144 |
+
|
145 |
+
// ============================================================================
|
146 |
+
|
147 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
148 |
+
namespace detail {
|
149 |
+
inline Tensor max_pool1d(
|
150 |
+
const Tensor& input,
|
151 |
+
ExpandingArray<1> kernel_size,
|
152 |
+
ExpandingArray<1> stride,
|
153 |
+
ExpandingArray<1> padding,
|
154 |
+
ExpandingArray<1> dilation,
|
155 |
+
bool ceil_mode) {
|
156 |
+
return torch::max_pool1d(
|
157 |
+
input, kernel_size, stride, padding, dilation, ceil_mode);
|
158 |
+
}
|
159 |
+
} // namespace detail
|
160 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
161 |
+
|
162 |
+
/// See
|
163 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool1d
|
164 |
+
/// about the exact behavior of this functional.
|
165 |
+
///
|
166 |
+
/// See the documentation for `torch::nn::functional::MaxPool1dFuncOptions`
|
167 |
+
/// class to learn what optional arguments are supported for this functional.
|
168 |
+
///
|
169 |
+
/// Example:
|
170 |
+
/// ```
|
171 |
+
/// namespace F = torch::nn::functional;
|
172 |
+
/// F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2));
|
173 |
+
/// ```
|
174 |
+
inline Tensor max_pool1d(
|
175 |
+
const Tensor& input,
|
176 |
+
const MaxPool1dFuncOptions& options) {
|
177 |
+
return detail::max_pool1d(
|
178 |
+
input,
|
179 |
+
options.kernel_size(),
|
180 |
+
options.stride(),
|
181 |
+
options.padding(),
|
182 |
+
options.dilation(),
|
183 |
+
options.ceil_mode());
|
184 |
+
}
|
185 |
+
|
186 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
187 |
+
namespace detail {
|
188 |
+
inline std::tuple<Tensor, Tensor> max_pool1d_with_indices(
|
189 |
+
const Tensor& input,
|
190 |
+
ExpandingArray<1> kernel_size,
|
191 |
+
ExpandingArray<1> stride,
|
192 |
+
ExpandingArray<1> padding,
|
193 |
+
ExpandingArray<1> dilation,
|
194 |
+
bool ceil_mode) {
|
195 |
+
return torch::max_pool1d_with_indices(
|
196 |
+
input, kernel_size, stride, padding, dilation, ceil_mode);
|
197 |
+
}
|
198 |
+
} // namespace detail
|
199 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
200 |
+
|
201 |
+
/// See the documentation for `torch::nn::functional::MaxPool1dFuncOptions`
|
202 |
+
/// class to learn what optional arguments are supported for this functional.
|
203 |
+
///
|
204 |
+
/// Example:
|
205 |
+
/// ```
|
206 |
+
/// namespace F = torch::nn::functional;
|
207 |
+
/// F::max_pool1d_with_indices(x, F::MaxPool1dFuncOptions(3).stride(2));
|
208 |
+
/// ```
|
209 |
+
inline std::tuple<Tensor, Tensor> max_pool1d_with_indices(
|
210 |
+
const Tensor& input,
|
211 |
+
const MaxPool1dFuncOptions& options) {
|
212 |
+
return detail::max_pool1d_with_indices(
|
213 |
+
input,
|
214 |
+
options.kernel_size(),
|
215 |
+
options.stride(),
|
216 |
+
options.padding(),
|
217 |
+
options.dilation(),
|
218 |
+
options.ceil_mode());
|
219 |
+
}
|
220 |
+
|
221 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
222 |
+
namespace detail {
|
223 |
+
inline Tensor max_pool2d(
|
224 |
+
const Tensor& input,
|
225 |
+
ExpandingArray<2> kernel_size,
|
226 |
+
ExpandingArray<2> stride,
|
227 |
+
ExpandingArray<2> padding,
|
228 |
+
ExpandingArray<2> dilation,
|
229 |
+
bool ceil_mode) {
|
230 |
+
return torch::max_pool2d(
|
231 |
+
input, kernel_size, stride, padding, dilation, ceil_mode);
|
232 |
+
}
|
233 |
+
} // namespace detail
|
234 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
235 |
+
|
236 |
+
/// See
|
237 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool2d
|
238 |
+
/// about the exact behavior of this functional.
|
239 |
+
///
|
240 |
+
/// See the documentation for `torch::nn::functional::MaxPool2dFuncOptions`
|
241 |
+
/// class to learn what optional arguments are supported for this functional.
|
242 |
+
///
|
243 |
+
/// Example:
|
244 |
+
/// ```
|
245 |
+
/// namespace F = torch::nn::functional;
|
246 |
+
/// F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2));
|
247 |
+
/// ```
|
248 |
+
inline Tensor max_pool2d(
|
249 |
+
const Tensor& input,
|
250 |
+
const MaxPool2dFuncOptions& options) {
|
251 |
+
return detail::max_pool2d(
|
252 |
+
input,
|
253 |
+
options.kernel_size(),
|
254 |
+
options.stride(),
|
255 |
+
options.padding(),
|
256 |
+
options.dilation(),
|
257 |
+
options.ceil_mode());
|
258 |
+
}
|
259 |
+
|
260 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
261 |
+
namespace detail {
|
262 |
+
inline std::tuple<Tensor, Tensor> max_pool2d_with_indices(
|
263 |
+
const Tensor& input,
|
264 |
+
ExpandingArray<2> kernel_size,
|
265 |
+
ExpandingArray<2> stride,
|
266 |
+
ExpandingArray<2> padding,
|
267 |
+
ExpandingArray<2> dilation,
|
268 |
+
bool ceil_mode) {
|
269 |
+
return torch::max_pool2d_with_indices(
|
270 |
+
input, kernel_size, stride, padding, dilation, ceil_mode);
|
271 |
+
}
|
272 |
+
} // namespace detail
|
273 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
274 |
+
|
275 |
+
/// See the documentation for `torch::nn::functional::MaxPool2dFuncOptions`
|
276 |
+
/// class to learn what optional arguments are supported for this functional.
|
277 |
+
///
|
278 |
+
/// Example:
|
279 |
+
/// ```
|
280 |
+
/// namespace F = torch::nn::functional;
|
281 |
+
/// F::max_pool2d_with_indices(x, F::MaxPool2dFuncOptions(3).stride(2));
|
282 |
+
/// ```
|
283 |
+
inline std::tuple<Tensor, Tensor> max_pool2d_with_indices(
|
284 |
+
const Tensor& input,
|
285 |
+
const MaxPool2dFuncOptions& options) {
|
286 |
+
return detail::max_pool2d_with_indices(
|
287 |
+
input,
|
288 |
+
options.kernel_size(),
|
289 |
+
options.stride(),
|
290 |
+
options.padding(),
|
291 |
+
options.dilation(),
|
292 |
+
options.ceil_mode());
|
293 |
+
}
|
294 |
+
|
295 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
296 |
+
namespace detail {
|
297 |
+
inline Tensor max_pool3d(
|
298 |
+
const Tensor& input,
|
299 |
+
ExpandingArray<3> kernel_size,
|
300 |
+
ExpandingArray<3> stride,
|
301 |
+
ExpandingArray<3> padding,
|
302 |
+
ExpandingArray<3> dilation,
|
303 |
+
bool ceil_mode) {
|
304 |
+
return torch::max_pool3d(
|
305 |
+
input, kernel_size, stride, padding, dilation, ceil_mode);
|
306 |
+
}
|
307 |
+
} // namespace detail
|
308 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
309 |
+
|
310 |
+
/// See
|
311 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool3d
|
312 |
+
/// about the exact behavior of this functional.
|
313 |
+
///
|
314 |
+
/// See the documentation for `torch::nn::functional::MaxPool3dFuncOptions`
|
315 |
+
/// class to learn what optional arguments are supported for this functional.
|
316 |
+
///
|
317 |
+
/// Example:
|
318 |
+
/// ```
|
319 |
+
/// namespace F = torch::nn::functional;
|
320 |
+
/// F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2));
|
321 |
+
/// ```
|
322 |
+
inline Tensor max_pool3d(
|
323 |
+
const Tensor& input,
|
324 |
+
const MaxPool3dFuncOptions& options) {
|
325 |
+
return detail::max_pool3d(
|
326 |
+
input,
|
327 |
+
options.kernel_size(),
|
328 |
+
options.stride(),
|
329 |
+
options.padding(),
|
330 |
+
options.dilation(),
|
331 |
+
options.ceil_mode());
|
332 |
+
}
|
333 |
+
|
334 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
335 |
+
namespace detail {
|
336 |
+
inline std::tuple<Tensor, Tensor> max_pool3d_with_indices(
|
337 |
+
const Tensor& input,
|
338 |
+
ExpandingArray<3> kernel_size,
|
339 |
+
ExpandingArray<3> stride,
|
340 |
+
ExpandingArray<3> padding,
|
341 |
+
ExpandingArray<3> dilation,
|
342 |
+
bool ceil_mode) {
|
343 |
+
return torch::max_pool3d_with_indices(
|
344 |
+
input, kernel_size, stride, padding, dilation, ceil_mode);
|
345 |
+
}
|
346 |
+
} // namespace detail
|
347 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
348 |
+
|
349 |
+
/// See the documentation for `torch::nn::functional::MaxPool3dFuncOptions`
|
350 |
+
/// class to learn what optional arguments are supported for this functional.
|
351 |
+
///
|
352 |
+
/// Example:
|
353 |
+
/// ```
|
354 |
+
/// namespace F = torch::nn::functional;
|
355 |
+
/// F::max_pool3d_with_indices(x, F::MaxPool3dFuncOptions(3).stride(2));
|
356 |
+
/// ```
|
357 |
+
inline std::tuple<Tensor, Tensor> max_pool3d_with_indices(
|
358 |
+
const Tensor& input,
|
359 |
+
const MaxPool3dFuncOptions& options) {
|
360 |
+
return detail::max_pool3d_with_indices(
|
361 |
+
input,
|
362 |
+
options.kernel_size(),
|
363 |
+
options.stride(),
|
364 |
+
options.padding(),
|
365 |
+
options.dilation(),
|
366 |
+
options.ceil_mode());
|
367 |
+
}
|
368 |
+
|
369 |
+
// ============================================================================
|
370 |
+
|
371 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
372 |
+
namespace detail {
|
373 |
+
inline std::tuple<Tensor, Tensor> adaptive_max_pool1d_with_indices(
|
374 |
+
const Tensor& input,
|
375 |
+
ExpandingArray<1> output_size) {
|
376 |
+
return torch::adaptive_max_pool1d(input, output_size);
|
377 |
+
}
|
378 |
+
} // namespace detail
|
379 |
+
|
380 |
+
/// See the documentation for
|
381 |
+
/// `torch::nn::functional::AdaptiveMaxPool1dFuncOptions` class to learn what
|
382 |
+
/// optional arguments are supported for this functional.
|
383 |
+
///
|
384 |
+
/// Example:
|
385 |
+
/// ```
|
386 |
+
/// namespace F = torch::nn::functional;
|
387 |
+
/// F::adaptive_max_pool1d_with_indices(x, F::AdaptiveMaxPool1dFuncOptions(3));
|
388 |
+
/// ```
|
389 |
+
inline std::tuple<Tensor, Tensor> adaptive_max_pool1d_with_indices(
|
390 |
+
const Tensor& input,
|
391 |
+
const AdaptiveMaxPool1dFuncOptions& options) {
|
392 |
+
return detail::adaptive_max_pool1d_with_indices(input, options.output_size());
|
393 |
+
}
|
394 |
+
|
395 |
+
namespace detail {
|
396 |
+
inline Tensor adaptive_max_pool1d(
|
397 |
+
const Tensor& input,
|
398 |
+
ExpandingArray<1> output_size) {
|
399 |
+
return std::get<0>(adaptive_max_pool1d_with_indices(input, output_size));
|
400 |
+
}
|
401 |
+
} // namespace detail
|
402 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
403 |
+
|
404 |
+
/// See
|
405 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool1d
|
406 |
+
/// about the exact behavior of this functional.
|
407 |
+
///
|
408 |
+
/// See the documentation for
|
409 |
+
/// `torch::nn::functional::AdaptiveMaxPool1dFuncOptions` class to learn what
|
410 |
+
/// optional arguments are supported for this functional.
|
411 |
+
///
|
412 |
+
/// Example:
|
413 |
+
/// ```
|
414 |
+
/// namespace F = torch::nn::functional;
|
415 |
+
/// F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3));
|
416 |
+
/// ```
|
417 |
+
inline Tensor adaptive_max_pool1d(
|
418 |
+
const Tensor& input,
|
419 |
+
const AdaptiveMaxPool1dFuncOptions& options) {
|
420 |
+
return detail::adaptive_max_pool1d(input, options.output_size());
|
421 |
+
}
|
422 |
+
|
423 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
424 |
+
namespace detail {
|
425 |
+
inline std::tuple<Tensor, Tensor> adaptive_max_pool2d_with_indices(
|
426 |
+
const Tensor& input,
|
427 |
+
ExpandingArrayWithOptionalElem<2> output_size) {
|
428 |
+
auto output_size_ =
|
429 |
+
torch::nn::modules::utils::_list_with_default(output_size, input.sizes());
|
430 |
+
return torch::adaptive_max_pool2d(input, output_size_);
|
431 |
+
}
|
432 |
+
} // namespace detail
|
433 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
434 |
+
|
435 |
+
/// See the documentation for
|
436 |
+
/// `torch::nn::functional::AdaptiveMaxPool2dFuncOptions` class to learn what
|
437 |
+
/// optional arguments are supported for this functional.
|
438 |
+
///
|
439 |
+
/// Example:
|
440 |
+
/// ```
|
441 |
+
/// namespace F = torch::nn::functional;
|
442 |
+
/// F::adaptive_max_pool2d_with_indices(x, F::AdaptiveMaxPool2dFuncOptions(3));
|
443 |
+
/// ```
|
444 |
+
inline std::tuple<Tensor, Tensor> adaptive_max_pool2d_with_indices(
|
445 |
+
const Tensor& input,
|
446 |
+
const AdaptiveMaxPool2dFuncOptions& options) {
|
447 |
+
return detail::adaptive_max_pool2d_with_indices(input, options.output_size());
|
448 |
+
}
|
449 |
+
|
450 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
451 |
+
namespace detail {
|
452 |
+
inline Tensor adaptive_max_pool2d(
|
453 |
+
const Tensor& input,
|
454 |
+
ExpandingArrayWithOptionalElem<2> output_size) {
|
455 |
+
return std::get<0>(adaptive_max_pool2d_with_indices(input, output_size));
|
456 |
+
}
|
457 |
+
} // namespace detail
|
458 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
459 |
+
|
460 |
+
/// See
|
461 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool2d
|
462 |
+
/// about the exact behavior of this functional.
|
463 |
+
///
|
464 |
+
/// See the documentation for
|
465 |
+
/// `torch::nn::functional::AdaptiveMaxPool2dFuncOptions` class to learn what
|
466 |
+
/// optional arguments are supported for this functional.
|
467 |
+
///
|
468 |
+
/// Example:
|
469 |
+
/// ```
|
470 |
+
/// namespace F = torch::nn::functional;
|
471 |
+
/// F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3));
|
472 |
+
/// ```
|
473 |
+
inline Tensor adaptive_max_pool2d(
|
474 |
+
const Tensor& input,
|
475 |
+
const AdaptiveMaxPool2dFuncOptions& options) {
|
476 |
+
return detail::adaptive_max_pool2d(input, options.output_size());
|
477 |
+
}
|
478 |
+
|
479 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
480 |
+
namespace detail {
|
481 |
+
inline std::tuple<Tensor, Tensor> adaptive_max_pool3d_with_indices(
|
482 |
+
const Tensor& input,
|
483 |
+
ExpandingArrayWithOptionalElem<3> output_size) {
|
484 |
+
auto output_size_ =
|
485 |
+
torch::nn::modules::utils::_list_with_default(output_size, input.sizes());
|
486 |
+
return torch::adaptive_max_pool3d(input, output_size_);
|
487 |
+
}
|
488 |
+
} // namespace detail
|
489 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
490 |
+
|
491 |
+
/// See the documentation for
|
492 |
+
/// `torch::nn::functional::AdaptiveMaxPool3dFuncOptions` class to learn what
|
493 |
+
/// optional arguments are supported for this functional.
|
494 |
+
///
|
495 |
+
/// Example:
|
496 |
+
/// ```
|
497 |
+
/// namespace F = torch::nn::functional;
|
498 |
+
/// F::adaptive_max_pool3d_with_indices(x, F::AdaptiveMaxPool3dFuncOptions(3));
|
499 |
+
/// ```
|
500 |
+
inline std::tuple<Tensor, Tensor> adaptive_max_pool3d_with_indices(
|
501 |
+
const Tensor& input,
|
502 |
+
const AdaptiveMaxPool3dFuncOptions& options) {
|
503 |
+
return detail::adaptive_max_pool3d_with_indices(input, options.output_size());
|
504 |
+
}
|
505 |
+
|
506 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
507 |
+
namespace detail {
|
508 |
+
inline Tensor adaptive_max_pool3d(
|
509 |
+
const Tensor& input,
|
510 |
+
ExpandingArrayWithOptionalElem<3> output_size) {
|
511 |
+
return std::get<0>(adaptive_max_pool3d_with_indices(input, output_size));
|
512 |
+
}
|
513 |
+
} // namespace detail
|
514 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
515 |
+
|
516 |
+
/// See
|
517 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool3d
|
518 |
+
/// about the exact behavior of this functional.
|
519 |
+
///
|
520 |
+
/// See the documentation for
|
521 |
+
/// `torch::nn::functional::AdaptiveMaxPool3dFuncOptions` class to learn what
|
522 |
+
/// optional arguments are supported for this functional.
|
523 |
+
///
|
524 |
+
/// Example:
|
525 |
+
/// ```
|
526 |
+
/// namespace F = torch::nn::functional;
|
527 |
+
/// F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3));
|
528 |
+
/// ```
|
529 |
+
inline Tensor adaptive_max_pool3d(
|
530 |
+
const Tensor& input,
|
531 |
+
const AdaptiveMaxPool3dFuncOptions& options) {
|
532 |
+
return detail::adaptive_max_pool3d(input, options.output_size());
|
533 |
+
}
|
534 |
+
|
535 |
+
// ============================================================================
|
536 |
+
|
537 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
538 |
+
namespace detail {
|
539 |
+
inline Tensor adaptive_avg_pool1d(
|
540 |
+
const Tensor& input,
|
541 |
+
ExpandingArray<1> output_size) {
|
542 |
+
return torch::adaptive_avg_pool1d(input, output_size);
|
543 |
+
}
|
544 |
+
} // namespace detail
|
545 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
546 |
+
|
547 |
+
/// See
|
548 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool1d
|
549 |
+
/// about the exact behavior of this functional.
|
550 |
+
///
|
551 |
+
/// See the documentation for
|
552 |
+
/// `torch::nn::functional::AdaptiveAvgPool1dFuncOptions` class to learn what
|
553 |
+
/// optional arguments are supported for this functional.
|
554 |
+
///
|
555 |
+
/// Example:
|
556 |
+
/// ```
|
557 |
+
/// namespace F = torch::nn::functional;
|
558 |
+
/// F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3));
|
559 |
+
/// ```
|
560 |
+
inline Tensor adaptive_avg_pool1d(
|
561 |
+
const Tensor& input,
|
562 |
+
const AdaptiveAvgPool1dFuncOptions& options) {
|
563 |
+
return detail::adaptive_avg_pool1d(input, options.output_size());
|
564 |
+
}
|
565 |
+
|
566 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
567 |
+
namespace detail {
|
568 |
+
inline Tensor adaptive_avg_pool2d(
|
569 |
+
const Tensor& input,
|
570 |
+
ExpandingArrayWithOptionalElem<2> output_size) {
|
571 |
+
auto output_size_ =
|
572 |
+
torch::nn::modules::utils::_list_with_default(output_size, input.sizes());
|
573 |
+
return torch::adaptive_avg_pool2d(input, output_size_);
|
574 |
+
}
|
575 |
+
} // namespace detail
|
576 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
577 |
+
|
578 |
+
/// See
|
579 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool2d
|
580 |
+
/// about the exact behavior of this functional.
|
581 |
+
///
|
582 |
+
/// See the documentation for
|
583 |
+
/// `torch::nn::functional::AdaptiveAvgPool2dFuncOptions` class to learn what
|
584 |
+
/// optional arguments are supported for this functional.
|
585 |
+
///
|
586 |
+
/// Example:
|
587 |
+
/// ```
|
588 |
+
/// namespace F = torch::nn::functional;
|
589 |
+
/// F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3));
|
590 |
+
/// ```
|
591 |
+
inline Tensor adaptive_avg_pool2d(
|
592 |
+
const Tensor& input,
|
593 |
+
const AdaptiveAvgPool2dFuncOptions& options) {
|
594 |
+
return detail::adaptive_avg_pool2d(input, options.output_size());
|
595 |
+
}
|
596 |
+
|
597 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
598 |
+
namespace detail {
|
599 |
+
inline Tensor adaptive_avg_pool3d(
|
600 |
+
const Tensor& input,
|
601 |
+
ExpandingArrayWithOptionalElem<3> output_size) {
|
602 |
+
auto output_size_ =
|
603 |
+
torch::nn::modules::utils::_list_with_default(output_size, input.sizes());
|
604 |
+
return torch::adaptive_avg_pool3d(input, output_size_);
|
605 |
+
}
|
606 |
+
} // namespace detail
|
607 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
608 |
+
|
609 |
+
/// See
|
610 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool3d
|
611 |
+
/// about the exact behavior of this functional.
|
612 |
+
///
|
613 |
+
/// See the documentation for
|
614 |
+
/// `torch::nn::functional::AdaptiveAvgPool3dFuncOptions` class to learn what
|
615 |
+
/// optional arguments are supported for this functional.
|
616 |
+
///
|
617 |
+
/// Example:
|
618 |
+
/// ```
|
619 |
+
/// namespace F = torch::nn::functional;
|
620 |
+
/// F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3));
|
621 |
+
/// ```
|
622 |
+
inline Tensor adaptive_avg_pool3d(
|
623 |
+
const Tensor& input,
|
624 |
+
const AdaptiveAvgPool3dFuncOptions& options) {
|
625 |
+
return detail::adaptive_avg_pool3d(input, options.output_size());
|
626 |
+
}
|
627 |
+
|
628 |
+
// ============================================================================
|
629 |
+
|
630 |
+
inline std::vector<int64_t> _unpool_output_size(
|
631 |
+
const Tensor& input,
|
632 |
+
const IntArrayRef& kernel_size,
|
633 |
+
const IntArrayRef& stride,
|
634 |
+
const IntArrayRef& padding,
|
635 |
+
const c10::optional<std::vector<int64_t>>& output_size) {
|
636 |
+
auto input_size = input.sizes();
|
637 |
+
std::vector<int64_t> default_size;
|
638 |
+
for (const auto d : c10::irange(kernel_size.size())) {
|
639 |
+
default_size.push_back(
|
640 |
+
(input_size[input_size.size() - kernel_size.size() + d] - 1) *
|
641 |
+
stride[d] +
|
642 |
+
kernel_size[d] - 2 * padding[d]);
|
643 |
+
}
|
644 |
+
if (!output_size) {
|
645 |
+
return default_size;
|
646 |
+
} else {
|
647 |
+
std::vector<int64_t> output_size_;
|
648 |
+
if (output_size->size() == kernel_size.size() + 2) {
|
649 |
+
output_size_ = IntArrayRef(*output_size).slice(2).vec();
|
650 |
+
}
|
651 |
+
if (output_size_.size() != kernel_size.size()) {
|
652 |
+
TORCH_CHECK(
|
653 |
+
false,
|
654 |
+
"output_size should be a sequence containing ",
|
655 |
+
kernel_size.size(),
|
656 |
+
" or ",
|
657 |
+
kernel_size.size() + 2,
|
658 |
+
" elements, but it has a length of '",
|
659 |
+
output_size_.size(),
|
660 |
+
"'");
|
661 |
+
}
|
662 |
+
for (const auto d : c10::irange(kernel_size.size())) {
|
663 |
+
const auto min_size = default_size[d] - stride[d];
|
664 |
+
const auto max_size = default_size[d] + stride[d];
|
665 |
+
if (!(min_size <= output_size_[d] && output_size_[d] <= max_size)) {
|
666 |
+
TORCH_CHECK(
|
667 |
+
false,
|
668 |
+
"invalid output_size ",
|
669 |
+
output_size_,
|
670 |
+
" (dim ",
|
671 |
+
d,
|
672 |
+
" must be between ",
|
673 |
+
min_size,
|
674 |
+
" and ",
|
675 |
+
max_size,
|
676 |
+
")");
|
677 |
+
}
|
678 |
+
}
|
679 |
+
return output_size_;
|
680 |
+
}
|
681 |
+
}
|
682 |
+
|
683 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
684 |
+
namespace detail {
|
685 |
+
inline Tensor max_unpool1d(
|
686 |
+
const Tensor& input,
|
687 |
+
const Tensor& indices,
|
688 |
+
ExpandingArray<1> kernel_size,
|
689 |
+
ExpandingArray<1> stride,
|
690 |
+
ExpandingArray<1> padding,
|
691 |
+
const c10::optional<std::vector<int64_t>>& output_size) {
|
692 |
+
auto output_size_ =
|
693 |
+
_unpool_output_size(input, kernel_size, stride, padding, output_size);
|
694 |
+
output_size_.push_back(1);
|
695 |
+
return torch::max_unpool2d(
|
696 |
+
input.unsqueeze(-1), indices.unsqueeze(-1), output_size_)
|
697 |
+
.squeeze(-1);
|
698 |
+
}
|
699 |
+
} // namespace detail
|
700 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
701 |
+
|
702 |
+
/// See
|
703 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool1d
|
704 |
+
/// about the exact behavior of this functional.
|
705 |
+
///
|
706 |
+
/// See the documentation for `torch::nn::functional::MaxUnpool1dFuncOptions`
|
707 |
+
/// class to learn what optional arguments are supported for this functional.
|
708 |
+
///
|
709 |
+
/// Example:
|
710 |
+
/// ```
|
711 |
+
/// namespace F = torch::nn::functional;
|
712 |
+
/// F::max_unpool1d(x, indices,
|
713 |
+
/// F::MaxUnpool1dFuncOptions(3).stride(2).padding(1));
|
714 |
+
/// ```
|
715 |
+
inline Tensor max_unpool1d(
|
716 |
+
const Tensor& input,
|
717 |
+
const Tensor& indices,
|
718 |
+
const MaxUnpool1dFuncOptions& options) {
|
719 |
+
return detail::max_unpool1d(
|
720 |
+
input,
|
721 |
+
indices,
|
722 |
+
options.kernel_size(),
|
723 |
+
options.stride(),
|
724 |
+
options.padding(),
|
725 |
+
options.output_size());
|
726 |
+
}
|
727 |
+
|
728 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
729 |
+
namespace detail {
|
730 |
+
inline Tensor max_unpool2d(
|
731 |
+
const Tensor& input,
|
732 |
+
const Tensor& indices,
|
733 |
+
ExpandingArray<2> kernel_size,
|
734 |
+
ExpandingArray<2> stride,
|
735 |
+
ExpandingArray<2> padding,
|
736 |
+
const c10::optional<std::vector<int64_t>>& output_size) {
|
737 |
+
auto output_size_ =
|
738 |
+
_unpool_output_size(input, kernel_size, stride, padding, output_size);
|
739 |
+
|
740 |
+
return torch::max_unpool2d(input, indices, output_size_);
|
741 |
+
}
|
742 |
+
} // namespace detail
|
743 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
744 |
+
|
745 |
+
/// See
|
746 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool2d
|
747 |
+
/// about the exact behavior of this functional.
|
748 |
+
///
|
749 |
+
/// See the documentation for `torch::nn::functional::MaxUnpool2dFuncOptions`
|
750 |
+
/// class to learn what optional arguments are supported for this functional.
|
751 |
+
///
|
752 |
+
/// Example:
|
753 |
+
/// ```
|
754 |
+
/// namespace F = torch::nn::functional;
|
755 |
+
/// F::max_unpool2d(x, indices,
|
756 |
+
/// F::MaxUnpool2dFuncOptions(3).stride(2).padding(1));
|
757 |
+
/// ```
|
758 |
+
inline Tensor max_unpool2d(
|
759 |
+
const Tensor& input,
|
760 |
+
const Tensor& indices,
|
761 |
+
const MaxUnpool2dFuncOptions& options) {
|
762 |
+
return detail::max_unpool2d(
|
763 |
+
input,
|
764 |
+
indices,
|
765 |
+
options.kernel_size(),
|
766 |
+
options.stride(),
|
767 |
+
options.padding(),
|
768 |
+
options.output_size());
|
769 |
+
}
|
770 |
+
|
771 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
772 |
+
namespace detail {
|
773 |
+
inline Tensor max_unpool3d(
|
774 |
+
const Tensor& input,
|
775 |
+
const Tensor& indices,
|
776 |
+
ExpandingArray<3> kernel_size,
|
777 |
+
ExpandingArray<3> stride,
|
778 |
+
ExpandingArray<3> padding,
|
779 |
+
const c10::optional<std::vector<int64_t>>& output_size) {
|
780 |
+
auto output_size_ =
|
781 |
+
_unpool_output_size(input, kernel_size, stride, padding, output_size);
|
782 |
+
|
783 |
+
return torch::max_unpool3d(input, indices, output_size_, stride, padding);
|
784 |
+
}
|
785 |
+
} // namespace detail
|
786 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
787 |
+
|
788 |
+
/// See
|
789 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool3d
|
790 |
+
/// about the exact behavior of this functional.
|
791 |
+
///
|
792 |
+
/// See the documentation for `torch::nn::functional::MaxUnpool3dFuncOptions`
|
793 |
+
/// class to learn what optional arguments are supported for this functional.
|
794 |
+
///
|
795 |
+
/// Example:
|
796 |
+
/// ```
|
797 |
+
/// namespace F = torch::nn::functional;
|
798 |
+
/// F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3));
|
799 |
+
/// ```
|
800 |
+
inline Tensor max_unpool3d(
|
801 |
+
const Tensor& input,
|
802 |
+
const Tensor& indices,
|
803 |
+
const MaxUnpool3dFuncOptions& options) {
|
804 |
+
return detail::max_unpool3d(
|
805 |
+
input,
|
806 |
+
indices,
|
807 |
+
options.kernel_size(),
|
808 |
+
options.stride(),
|
809 |
+
options.padding(),
|
810 |
+
options.output_size());
|
811 |
+
}
|
812 |
+
|
813 |
+
// ============================================================================
|
814 |
+
|
815 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
816 |
+
namespace detail {
|
817 |
+
inline std::tuple<Tensor, Tensor> fractional_max_pool2d_with_indices(
|
818 |
+
const Tensor& input,
|
819 |
+
const ExpandingArray<2>& kernel_size,
|
820 |
+
const c10::optional<ExpandingArray<2>>& output_size,
|
821 |
+
const c10::optional<ExpandingArray<2, double>>& output_ratio,
|
822 |
+
const Tensor& _random_samples) {
|
823 |
+
if (output_size == c10::nullopt && output_ratio == c10::nullopt) {
|
824 |
+
TORCH_CHECK(
|
825 |
+
false,
|
826 |
+
"fractional_max_pool2d requires specifying either ",
|
827 |
+
"an output_size or an output_ratio");
|
828 |
+
}
|
829 |
+
c10::optional<ExpandingArray<2>> output_size_ = output_size;
|
830 |
+
if (output_size_ == c10::nullopt) {
|
831 |
+
TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt);
|
832 |
+
output_size_ = {
|
833 |
+
(int64_t)(static_cast<double>(input.size(-2)) * (*output_ratio.value())[0]),
|
834 |
+
(int64_t)(static_cast<double>(input.size(-1)) * (*output_ratio.value())[1])};
|
835 |
+
}
|
836 |
+
|
837 |
+
Tensor _random_samples_ = _random_samples;
|
838 |
+
if (!_random_samples_.defined()) {
|
839 |
+
auto n_batch = input.dim() == 3 ? 1 : input.size(0);
|
840 |
+
_random_samples_ = torch::rand(
|
841 |
+
{n_batch, input.size(-3), 2},
|
842 |
+
torch::TensorOptions().dtype(input.dtype()).device(input.device()));
|
843 |
+
}
|
844 |
+
return torch::fractional_max_pool2d(
|
845 |
+
input, kernel_size, *output_size_, _random_samples_);
|
846 |
+
}
|
847 |
+
} // namespace detail
|
848 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
849 |
+
|
850 |
+
/// See the documentation for
|
851 |
+
/// `torch::nn::functional::FractionalMaxPool2dFuncOptions` class to learn what
|
852 |
+
/// optional arguments are supported for this functional.
|
853 |
+
///
|
854 |
+
/// Example:
|
855 |
+
/// ```
|
856 |
+
/// namespace F = torch::nn::functional;
|
857 |
+
/// F::fractional_max_pool2d_with_indices(x,
|
858 |
+
/// F::FractionalMaxPool2dFuncOptions(3).output_size(2));
|
859 |
+
/// ```
|
860 |
+
inline std::tuple<Tensor, Tensor> fractional_max_pool2d_with_indices(
|
861 |
+
const Tensor& input,
|
862 |
+
const FractionalMaxPool2dFuncOptions& options) {
|
863 |
+
return detail::fractional_max_pool2d_with_indices(
|
864 |
+
input,
|
865 |
+
options.kernel_size(),
|
866 |
+
options.output_size(),
|
867 |
+
options.output_ratio(),
|
868 |
+
options._random_samples());
|
869 |
+
}
|
870 |
+
|
871 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
872 |
+
namespace detail {
|
873 |
+
inline Tensor fractional_max_pool2d(
|
874 |
+
const Tensor& input,
|
875 |
+
ExpandingArray<2> kernel_size,
|
876 |
+
c10::optional<ExpandingArray<2>> output_size,
|
877 |
+
c10::optional<ExpandingArray<2, double>> output_ratio,
|
878 |
+
const Tensor& _random_samples) {
|
879 |
+
return std::get<0>(fractional_max_pool2d_with_indices(
|
880 |
+
input, kernel_size, output_size, output_ratio, _random_samples));
|
881 |
+
}
|
882 |
+
} // namespace detail
|
883 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
884 |
+
|
885 |
+
/// See the documentation for
|
886 |
+
/// `torch::nn::functional::FractionalMaxPool2dFuncOptions` class to learn what
|
887 |
+
/// optional arguments are supported for this functional.
|
888 |
+
///
|
889 |
+
/// Example:
|
890 |
+
/// ```
|
891 |
+
/// namespace F = torch::nn::functional;
|
892 |
+
/// F::fractional_max_pool2d(x,
|
893 |
+
/// F::FractionalMaxPool2dFuncOptions(3).output_size(2));
|
894 |
+
/// ```
|
895 |
+
inline Tensor fractional_max_pool2d(
|
896 |
+
const Tensor& input,
|
897 |
+
const FractionalMaxPool2dFuncOptions& options) {
|
898 |
+
return detail::fractional_max_pool2d(
|
899 |
+
input,
|
900 |
+
options.kernel_size(),
|
901 |
+
options.output_size(),
|
902 |
+
options.output_ratio(),
|
903 |
+
options._random_samples());
|
904 |
+
}
|
905 |
+
|
906 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
907 |
+
namespace detail {
|
908 |
+
inline std::tuple<Tensor, Tensor> fractional_max_pool3d_with_indices(
|
909 |
+
const Tensor& input,
|
910 |
+
const ExpandingArray<3>& kernel_size,
|
911 |
+
const c10::optional<ExpandingArray<3>>& output_size,
|
912 |
+
const c10::optional<ExpandingArray<3, double>>& output_ratio,
|
913 |
+
const Tensor& _random_samples) {
|
914 |
+
if (output_size == c10::nullopt && output_ratio == c10::nullopt) {
|
915 |
+
TORCH_CHECK(
|
916 |
+
false,
|
917 |
+
"fractional_max_pool3d requires specifying either ",
|
918 |
+
"an output_size or an output_ratio");
|
919 |
+
}
|
920 |
+
|
921 |
+
c10::optional<ExpandingArray<3>> output_size_ = output_size;
|
922 |
+
if (output_size_ == c10::nullopt) {
|
923 |
+
TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt);
|
924 |
+
output_size_ = {
|
925 |
+
(int64_t)(static_cast<double>(input.size(-3)) * (*output_ratio.value())[0]),
|
926 |
+
(int64_t)(static_cast<double>(input.size(-2)) * (*output_ratio.value())[1]),
|
927 |
+
(int64_t)(static_cast<double>(input.size(-1)) * (*output_ratio.value())[2])};
|
928 |
+
}
|
929 |
+
|
930 |
+
Tensor _random_samples_ = _random_samples;
|
931 |
+
if (!_random_samples_.defined()) {
|
932 |
+
auto n_batch = input.dim() == 4 ? 1 : input.size(0);
|
933 |
+
_random_samples_ = torch::rand(
|
934 |
+
{n_batch, input.size(-4), 3},
|
935 |
+
torch::TensorOptions().dtype(input.dtype()).device(input.device()));
|
936 |
+
}
|
937 |
+
return torch::fractional_max_pool3d(
|
938 |
+
input, kernel_size, *output_size_, _random_samples_);
|
939 |
+
}
|
940 |
+
} // namespace detail
|
941 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
942 |
+
|
943 |
+
/// See the documentation for
|
944 |
+
/// `torch::nn::functional::FractionalMaxPool3dFuncOptions` class to learn what
|
945 |
+
/// optional arguments are supported for this functional.
|
946 |
+
///
|
947 |
+
/// Example:
|
948 |
+
/// ```
|
949 |
+
/// namespace F = torch::nn::functional;
|
950 |
+
/// F::fractional_max_pool3d_with_indices(x,
|
951 |
+
/// F::FractionalMaxPool3dFuncOptions(3).output_size(2));
|
952 |
+
/// ```
|
953 |
+
inline std::tuple<Tensor, Tensor> fractional_max_pool3d_with_indices(
|
954 |
+
const Tensor& input,
|
955 |
+
const FractionalMaxPool3dFuncOptions& options) {
|
956 |
+
return detail::fractional_max_pool3d_with_indices(
|
957 |
+
input,
|
958 |
+
options.kernel_size(),
|
959 |
+
options.output_size(),
|
960 |
+
options.output_ratio(),
|
961 |
+
options._random_samples());
|
962 |
+
}
|
963 |
+
|
964 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
965 |
+
namespace detail {
|
966 |
+
inline Tensor fractional_max_pool3d(
|
967 |
+
const Tensor& input,
|
968 |
+
ExpandingArray<3> kernel_size,
|
969 |
+
c10::optional<ExpandingArray<3>> output_size,
|
970 |
+
c10::optional<ExpandingArray<3, double>> output_ratio,
|
971 |
+
const Tensor& _random_samples) {
|
972 |
+
return std::get<0>(fractional_max_pool3d_with_indices(
|
973 |
+
input, kernel_size, output_size, output_ratio, _random_samples));
|
974 |
+
}
|
975 |
+
} // namespace detail
|
976 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
977 |
+
|
978 |
+
/// See the documentation for
|
979 |
+
/// `torch::nn::functional::FractionalMaxPool3dFuncOptions` class to learn what
|
980 |
+
/// optional arguments are supported for this functional.
|
981 |
+
///
|
982 |
+
/// Example:
|
983 |
+
/// ```
|
984 |
+
/// namespace F = torch::nn::functional;
|
985 |
+
/// F::fractional_max_pool3d(x,
|
986 |
+
/// F::FractionalMaxPool3dFuncOptions(3).output_size(2));
|
987 |
+
/// ```
|
988 |
+
inline Tensor fractional_max_pool3d(
|
989 |
+
const Tensor& input,
|
990 |
+
const FractionalMaxPool3dFuncOptions& options) {
|
991 |
+
return detail::fractional_max_pool3d(
|
992 |
+
input,
|
993 |
+
options.kernel_size(),
|
994 |
+
options.output_size(),
|
995 |
+
options.output_ratio(),
|
996 |
+
options._random_samples());
|
997 |
+
}
|
998 |
+
|
999 |
+
// ============================================================================
|
1000 |
+
|
1001 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
1002 |
+
namespace detail {
|
1003 |
+
inline Tensor lp_pool1d(
|
1004 |
+
const Tensor& input,
|
1005 |
+
double norm_type,
|
1006 |
+
ExpandingArray<1> kernel_size,
|
1007 |
+
ExpandingArray<1> stride,
|
1008 |
+
bool ceil_mode) {
|
1009 |
+
Tensor out = detail::avg_pool1d(
|
1010 |
+
input.pow(norm_type),
|
1011 |
+
kernel_size,
|
1012 |
+
stride,
|
1013 |
+
/*padding=*/0,
|
1014 |
+
ceil_mode,
|
1015 |
+
/*count_include_pad=*/true);
|
1016 |
+
|
1017 |
+
return (torch::sign(out) * relu(torch::abs(out)))
|
1018 |
+
.mul((*kernel_size)[0])
|
1019 |
+
.pow(1. / norm_type);
|
1020 |
+
}
|
1021 |
+
} // namespace detail
|
1022 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
1023 |
+
|
1024 |
+
/// See
|
1025 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool1d
|
1026 |
+
/// about the exact behavior of this functional.
|
1027 |
+
///
|
1028 |
+
/// See the documentation for `torch::nn::functional::LPPool1dFuncOptions` class
|
1029 |
+
/// to learn what optional arguments are supported for this functional.
|
1030 |
+
///
|
1031 |
+
/// Example:
|
1032 |
+
/// ```
|
1033 |
+
/// namespace F = torch::nn::functional;
|
1034 |
+
/// F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2));
|
1035 |
+
/// ```
|
1036 |
+
inline Tensor lp_pool1d(
|
1037 |
+
const Tensor& input,
|
1038 |
+
const LPPool1dFuncOptions& options) {
|
1039 |
+
return detail::lp_pool1d(
|
1040 |
+
input,
|
1041 |
+
options.norm_type(),
|
1042 |
+
options.kernel_size(),
|
1043 |
+
options.stride(),
|
1044 |
+
options.ceil_mode());
|
1045 |
+
}
|
1046 |
+
|
1047 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
1048 |
+
namespace detail {
|
1049 |
+
inline Tensor lp_pool2d(
|
1050 |
+
const Tensor& input,
|
1051 |
+
double norm_type,
|
1052 |
+
ExpandingArray<2> kernel_size,
|
1053 |
+
ExpandingArray<2> stride,
|
1054 |
+
bool ceil_mode) {
|
1055 |
+
int kw = (*kernel_size)[0];
|
1056 |
+
int kh = (*kernel_size)[1];
|
1057 |
+
Tensor out = detail::avg_pool2d(
|
1058 |
+
input.pow(norm_type),
|
1059 |
+
kernel_size,
|
1060 |
+
stride,
|
1061 |
+
/*padding=*/0,
|
1062 |
+
ceil_mode,
|
1063 |
+
/*count_include_pad=*/true,
|
1064 |
+
/*divisor_override=*/c10::nullopt);
|
1065 |
+
|
1066 |
+
return (torch::sign(out) * relu(torch::abs(out)))
|
1067 |
+
.mul(kw * kh)
|
1068 |
+
.pow(1. / norm_type);
|
1069 |
+
}
|
1070 |
+
} // namespace detail
|
1071 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
1072 |
+
|
1073 |
+
/// See
|
1074 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool2d
|
1075 |
+
/// about the exact behavior of this functional.
|
1076 |
+
///
|
1077 |
+
/// See the documentation for `torch::nn::functional::LPPool2dFuncOptions` class
|
1078 |
+
/// to learn what optional arguments are supported for this functional.
|
1079 |
+
///
|
1080 |
+
/// Example:
|
1081 |
+
/// ```
|
1082 |
+
/// namespace F = torch::nn::functional;
|
1083 |
+
/// F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2));
|
1084 |
+
/// ```
|
1085 |
+
inline Tensor lp_pool2d(
|
1086 |
+
const Tensor& input,
|
1087 |
+
const LPPool2dFuncOptions& options) {
|
1088 |
+
return detail::lp_pool2d(
|
1089 |
+
input,
|
1090 |
+
options.norm_type(),
|
1091 |
+
options.kernel_size(),
|
1092 |
+
options.stride(),
|
1093 |
+
options.ceil_mode());
|
1094 |
+
}
|
1095 |
+
|
1096 |
+
} // namespace functional
|
1097 |
+
} // namespace nn
|
1098 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/vision.h
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/nn/options/vision.h>
|
4 |
+
#include <torch/types.h>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
namespace nn {
|
8 |
+
namespace functional {
|
9 |
+
|
10 |
+
inline Tensor affine_grid(
|
11 |
+
const Tensor& theta,
|
12 |
+
const IntArrayRef& size,
|
13 |
+
bool align_corners = false) {
|
14 |
+
// enforce floating point dtype on theta
|
15 |
+
TORCH_CHECK(
|
16 |
+
theta.is_floating_point(),
|
17 |
+
"Expected theta to have floating point type, but got ",
|
18 |
+
theta.dtype());
|
19 |
+
|
20 |
+
// check that shapes and sizes match
|
21 |
+
if (size.size() == 4) {
|
22 |
+
TORCH_CHECK(
|
23 |
+
theta.dim() == 3 && theta.size(-2) == 2 && theta.size(-1) == 3,
|
24 |
+
"Expected a batch of 2D affine matrices of shape Nx2x3 for size ",
|
25 |
+
size,
|
26 |
+
". Got ",
|
27 |
+
theta.sizes(),
|
28 |
+
".");
|
29 |
+
} else if (size.size() == 5) {
|
30 |
+
TORCH_CHECK(
|
31 |
+
theta.dim() == 3 && theta.size(-2) == 3 && theta.size(-1) == 4,
|
32 |
+
"Expected a batch of 3D affine matrices of shape Nx3x4 for size ",
|
33 |
+
size,
|
34 |
+
". Got ",
|
35 |
+
theta.sizes(),
|
36 |
+
".");
|
37 |
+
} else {
|
38 |
+
TORCH_CHECK(
|
39 |
+
false,
|
40 |
+
"affine_grid only supports 4D and 5D sizes, ",
|
41 |
+
"for 2D and 3D affine transforms, respectively. ",
|
42 |
+
"Got size ",
|
43 |
+
size);
|
44 |
+
}
|
45 |
+
|
46 |
+
if (*std::min_element(size.begin(), size.end()) <= 0) {
|
47 |
+
TORCH_CHECK(false, "Expected non-zero, positive output size. Got ", size);
|
48 |
+
}
|
49 |
+
|
50 |
+
return torch::affine_grid_generator(theta, size, align_corners);
|
51 |
+
}
|
52 |
+
|
53 |
+
// ============================================================================
|
54 |
+
|
55 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
56 |
+
namespace detail {
|
57 |
+
inline Tensor grid_sample(
|
58 |
+
const Tensor& input,
|
59 |
+
const Tensor& grid,
|
60 |
+
GridSampleFuncOptions::mode_t mode,
|
61 |
+
GridSampleFuncOptions::padding_mode_t padding_mode,
|
62 |
+
c10::optional<bool> align_corners) {
|
63 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
64 |
+
int64_t mode_enum, padding_mode_enum;
|
65 |
+
|
66 |
+
if (std::holds_alternative<enumtype::kBilinear>(mode)) {
|
67 |
+
mode_enum = 0;
|
68 |
+
} else if (std::holds_alternative<enumtype::kNearest>(mode)) {
|
69 |
+
mode_enum = 1;
|
70 |
+
} else { /// mode == 'bicubic'
|
71 |
+
mode_enum = 2;
|
72 |
+
}
|
73 |
+
|
74 |
+
if (std::holds_alternative<enumtype::kZeros>(padding_mode)) {
|
75 |
+
padding_mode_enum = 0;
|
76 |
+
} else if (std::holds_alternative<enumtype::kBorder>(padding_mode)) {
|
77 |
+
padding_mode_enum = 1;
|
78 |
+
} else { /// padding_mode == 'reflection'
|
79 |
+
padding_mode_enum = 2;
|
80 |
+
}
|
81 |
+
|
82 |
+
if (!align_corners.has_value()) {
|
83 |
+
TORCH_WARN(
|
84 |
+
"Default grid_sample and affine_grid behavior has changed ",
|
85 |
+
"to align_corners=False since 1.3.0. Please specify ",
|
86 |
+
"align_corners=True if the old behavior is desired. ",
|
87 |
+
"See the documentation of grid_sample for details.");
|
88 |
+
align_corners = false;
|
89 |
+
}
|
90 |
+
|
91 |
+
return torch::grid_sampler(
|
92 |
+
input, grid, mode_enum, padding_mode_enum, align_corners.value());
|
93 |
+
}
|
94 |
+
} // namespace detail
|
95 |
+
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
96 |
+
|
97 |
+
/// See
|
98 |
+
/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.grid_sample
|
99 |
+
/// about the exact behavior of this functional.
|
100 |
+
///
|
101 |
+
/// See the documentation for `torch::nn::functional::GridSampleFuncOptions`
|
102 |
+
/// class to learn what optional arguments are supported for this functional.
|
103 |
+
///
|
104 |
+
/// Example:
|
105 |
+
/// ```
|
106 |
+
/// namespace F = torch::nn::functional;
|
107 |
+
/// F::grid_sample(input, grid,
|
108 |
+
/// F::GridSampleFuncOptions().mode(torch::kBilinear).padding_mode(torch::kZeros).align_corners(true));
|
109 |
+
/// ```
|
110 |
+
inline Tensor grid_sample(
|
111 |
+
const Tensor& input,
|
112 |
+
const Tensor& grid,
|
113 |
+
const GridSampleFuncOptions& options = {}) {
|
114 |
+
return detail::grid_sample(
|
115 |
+
input,
|
116 |
+
grid,
|
117 |
+
options.mode(),
|
118 |
+
options.padding_mode(),
|
119 |
+
options.align_corners());
|
120 |
+
}
|
121 |
+
|
122 |
+
} // namespace functional
|
123 |
+
} // namespace nn
|
124 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
#include <torch/enum.h>
|
5 |
+
#include <torch/types.h>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace nn {
|
9 |
+
namespace init {
|
10 |
+
|
11 |
+
using NonlinearityType = std::variant<
|
12 |
+
enumtype::kLinear,
|
13 |
+
enumtype::kConv1D,
|
14 |
+
enumtype::kConv2D,
|
15 |
+
enumtype::kConv3D,
|
16 |
+
enumtype::kConvTranspose1D,
|
17 |
+
enumtype::kConvTranspose2D,
|
18 |
+
enumtype::kConvTranspose3D,
|
19 |
+
enumtype::kSigmoid,
|
20 |
+
enumtype::kTanh,
|
21 |
+
enumtype::kReLU,
|
22 |
+
enumtype::kLeakyReLU>;
|
23 |
+
|
24 |
+
using FanModeType = std::variant<enumtype::kFanIn, enumtype::kFanOut>;
|
25 |
+
|
26 |
+
} // namespace init
|
27 |
+
} // namespace nn
|
28 |
+
|
29 |
+
namespace nn {
|
30 |
+
namespace init {
|
31 |
+
|
32 |
+
/// Return the recommended gain value for the given nonlinearity function.
|
33 |
+
TORCH_API double calculate_gain(
|
34 |
+
NonlinearityType nonlinearity,
|
35 |
+
double param = 0.01);
|
36 |
+
|
37 |
+
/// Fills the given `tensor` with the provided `value` in-place, and returns it.
|
38 |
+
/// No gradient will be recorded for this operation.
|
39 |
+
TORCH_API Tensor constant_(Tensor tensor, Scalar value);
|
40 |
+
|
41 |
+
/// Fills the given `tensor` with the Dirac delta function in-place, and returns
|
42 |
+
/// it. No gradient will be recorded for this operation.
|
43 |
+
TORCH_API Tensor dirac_(Tensor tensor);
|
44 |
+
|
45 |
+
/// Fills the given 2-dimensional `matrix` with an identity matrix.
|
46 |
+
/// No gradient will be recorded for this operation.
|
47 |
+
TORCH_API Tensor eye_(Tensor matrix);
|
48 |
+
|
49 |
+
/// Fills the given 2-dimensional `matrix` with values drawn from a normal
|
50 |
+
/// distribution parameterized by `mean` and `std`.
|
51 |
+
/// No gradient will be recorded for this operation.
|
52 |
+
TORCH_API Tensor normal_(Tensor tensor, double mean = 0, double std = 1);
|
53 |
+
|
54 |
+
/// Fills the given `tensor` with ones.
|
55 |
+
/// No gradient will be recorded for this operation.
|
56 |
+
TORCH_API Tensor ones_(Tensor tensor);
|
57 |
+
|
58 |
+
/// Fills the input `Tensor` with a (semi) orthogonal matrix, as described in
|
59 |
+
/// "Exact solutions to the nonlinear dynamics of learning in deep linear neural
|
60 |
+
/// networks" - Saxe, A. et al. (2013). The input tensor must have at least 2
|
61 |
+
/// dimensions, and for tensors with more than 2 dimensions the trailing
|
62 |
+
/// dimensions are flattened.
|
63 |
+
/// No gradient will be recorded for this operation.
|
64 |
+
TORCH_API Tensor orthogonal_(Tensor tensor, double gain = 1.0);
|
65 |
+
|
66 |
+
/// Fills the 2D input `Tensor` as a sparse matrix, where the
|
67 |
+
/// non-zero elements will be drawn from a centered normal distribution
|
68 |
+
/// with the given standard deviation `std`, as described in "Deep learning via
|
69 |
+
/// Hessian-free optimization" - Martens, J. (2010). The `sparsity` is a real
|
70 |
+
/// value between 0 and 1 that controls the fraction of elements in each column
|
71 |
+
/// to be set to zero.
|
72 |
+
/// No gradient will be recorded for this operation.
|
73 |
+
TORCH_API Tensor sparse_(Tensor tensor, double sparsity, double std = 0.01);
|
74 |
+
|
75 |
+
/// Fills the given 2-dimensional `matrix` with values drawn from a uniform
|
76 |
+
/// distribution parameterized by `low` and `high`.
|
77 |
+
/// No gradient will be recorded for this operation.
|
78 |
+
TORCH_API Tensor uniform_(Tensor tensor, double low = 0, double high = 1);
|
79 |
+
|
80 |
+
/// Fills the input `Tensor` with values according to the method
|
81 |
+
/// described in "Delving deep into rectifiers: Surpassing human-level
|
82 |
+
/// performance on ImageNet classification" - He, K. et al. (2015), using a
|
83 |
+
/// normal distribution. Also known as He initialization.
|
84 |
+
/// No gradient will be recorded for this operation.
|
85 |
+
TORCH_API Tensor kaiming_normal_(
|
86 |
+
Tensor tensor,
|
87 |
+
double a = 0,
|
88 |
+
FanModeType mode = torch::kFanIn,
|
89 |
+
NonlinearityType nonlinearity = torch::kLeakyReLU);
|
90 |
+
|
91 |
+
/// Fills the input `Tensor` with values according to the method
|
92 |
+
/// described in "Delving deep into rectifiers: Surpassing human-level
|
93 |
+
/// performance on ImageNet classification" - He, K. et al. (2015), using a
|
94 |
+
/// uniform distribution. Also known as He initialization.
|
95 |
+
/// No gradient will be recorded for this operation.
|
96 |
+
TORCH_API Tensor kaiming_uniform_(
|
97 |
+
Tensor tensor,
|
98 |
+
double a = 0,
|
99 |
+
FanModeType mode = torch::kFanIn,
|
100 |
+
NonlinearityType nonlinearity = torch::kLeakyReLU);
|
101 |
+
|
102 |
+
/// Fills the input `Tensor` with values according to the method
|
103 |
+
/// described in "Understanding the difficulty of training deep feedforward
|
104 |
+
/// neural networks" - Glorot, X. & Bengio, Y. (2010). Values are scaled by the
|
105 |
+
/// `gain` parameter. No gradient will be recorded for this operation.
|
106 |
+
TORCH_API Tensor xavier_normal_(Tensor tensor, double gain = 1.0);
|
107 |
+
|
108 |
+
/// Fills the input `Tensor` with values according to the method
|
109 |
+
/// described in "Understanding the difficulty of training deep feedforward
|
110 |
+
/// neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform
|
111 |
+
/// distribution. Values are scaled by the `gain` parameter
|
112 |
+
/// No gradient will be recorded for this operation.
|
113 |
+
TORCH_API Tensor xavier_uniform_(Tensor tensor, double gain = 1.0);
|
114 |
+
|
115 |
+
/// Fills the given `tensor` with zeros.
|
116 |
+
/// No gradient will be recorded for this operation.
|
117 |
+
TORCH_API Tensor zeros_(Tensor tensor);
|
118 |
+
|
119 |
+
TORCH_API std::tuple<int64_t, int64_t> _calculate_fan_in_and_fan_out(
|
120 |
+
const Tensor& tensor);
|
121 |
+
|
122 |
+
} // namespace init
|
123 |
+
} // namespace nn
|
124 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h
ADDED
@@ -0,0 +1,702 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/nn/modules/container/any_module_holder.h>
|
4 |
+
#include <torch/nn/modules/container/any_value.h>
|
5 |
+
#include <torch/nn/pimpl.h>
|
6 |
+
#include <torch/ordered_dict.h>
|
7 |
+
#include <torch/serialize/archive.h>
|
8 |
+
#include <torch/types.h>
|
9 |
+
|
10 |
+
#include <ATen/ATen.h>
|
11 |
+
|
12 |
+
#include <functional>
|
13 |
+
#include <iosfwd>
|
14 |
+
#include <map>
|
15 |
+
#include <memory>
|
16 |
+
#include <string>
|
17 |
+
#include <type_traits>
|
18 |
+
|
19 |
+
namespace torch {
|
20 |
+
namespace nn {
|
21 |
+
|
22 |
+
/// The base class for all modules in PyTorch.
|
23 |
+
///
|
24 |
+
/// \rst
|
25 |
+
/// .. note::
|
26 |
+
/// The design and implementation of this class is largely based on the Python
|
27 |
+
/// API. You may want to consult the python documentation for
|
28 |
+
/// :py:class:`pytorch:torch.nn.Module` for further clarification on certain
|
29 |
+
/// methods or behavior.
|
30 |
+
/// \endrst
|
31 |
+
///
|
32 |
+
/// A `Module` is an abstraction over the implementation of some function or
|
33 |
+
/// algorithm, possibly associated with some persistent data. A `Module` may
|
34 |
+
/// contain further `Module`s ("submodules"), each with their own
|
35 |
+
/// implementation, persistent data and further submodules. `Module`s can thus
|
36 |
+
/// be said to form a recursive tree structure. A `Module` is registered as a
|
37 |
+
/// submodule to another `Module` by calling `register_module()`, typically from
|
38 |
+
/// within a parent module's constructor.
|
39 |
+
///
|
40 |
+
/// A distinction is made between three kinds of persistent data that may be
|
41 |
+
/// associated with a `Module`:
|
42 |
+
///
|
43 |
+
/// 1. *Parameters*: tensors that record gradients, typically weights updated
|
44 |
+
/// during the backward step (e.g. the `weight` of a `Linear` module),
|
45 |
+
/// 2. *Buffers*: tensors that do not record gradients, typically updated during
|
46 |
+
/// the forward step, such as running statistics (e.g. `mean` and `variance`
|
47 |
+
/// in the `BatchNorm` module),
|
48 |
+
/// 3. Any additional state, not necessarily tensors, required for the
|
49 |
+
/// implementation or configuration of a `Module`.
|
50 |
+
///
|
51 |
+
/// The first two kinds of state are special in that they may be registered
|
52 |
+
/// with the `Module` system to allow convenient access and batch configuration.
|
53 |
+
/// For example, registered parameters in any `Module` may be iterated over via
|
54 |
+
/// the `parameters()` accessor. Further, changing the data type of a `Module`'s
|
55 |
+
/// registered parameters can be done conveniently via `Module::to()`, e.g.
|
56 |
+
/// `module->to(torch::kCUDA)` to move all parameters to GPU memory. Lastly,
|
57 |
+
/// registered parameters and buffers are handled specially during a `clone()`
|
58 |
+
/// operation, which performs a deepcopy of a cloneable `Module` hierarchy.
|
59 |
+
///
|
60 |
+
/// Parameters are registered with a `Module` via `register_parameter`. Buffers
|
61 |
+
/// are registered separately via `register_buffer`. These methods are part of
|
62 |
+
/// the public API of `Module` and are typically invoked from within a
|
63 |
+
/// concrete `Module`s constructor.
|
64 |
+
class TORCH_API Module : public std::enable_shared_from_this<Module> {
|
65 |
+
public:
|
66 |
+
using ModuleApplyFunction = std::function<void(Module&)>;
|
67 |
+
using ConstModuleApplyFunction = std::function<void(const Module&)>;
|
68 |
+
using NamedModuleApplyFunction =
|
69 |
+
std::function<void(const std::string&, Module&)>;
|
70 |
+
using ConstNamedModuleApplyFunction =
|
71 |
+
std::function<void(const std::string&, const Module&)>;
|
72 |
+
using ModulePointerApplyFunction =
|
73 |
+
std::function<void(const std::shared_ptr<Module>&)>;
|
74 |
+
using NamedModulePointerApplyFunction =
|
75 |
+
std::function<void(const std::string&, const std::shared_ptr<Module>&)>;
|
76 |
+
|
77 |
+
/// Tells the base `Module` about the name of the submodule.
|
78 |
+
explicit Module(std::string name);
|
79 |
+
|
80 |
+
/// Constructs the module without immediate knowledge of the submodule's name.
|
81 |
+
/// The name of the submodule is inferred via RTTI (if possible) the first
|
82 |
+
/// time `.name()` is invoked.
|
83 |
+
Module();
|
84 |
+
Module(const Module&) = default;
|
85 |
+
Module& operator=(const Module&) = default;
|
86 |
+
Module(Module&&) noexcept = default;
|
87 |
+
Module& operator=(Module&&) noexcept = default;
|
88 |
+
|
89 |
+
virtual ~Module() = default;
|
90 |
+
|
91 |
+
/// Returns the name of the `Module`.
|
92 |
+
///
|
93 |
+
/// A `Module` has an associated `name`, which is a string representation of
|
94 |
+
/// the kind of concrete `Module` it represents, such as `"Linear"` for the
|
95 |
+
/// `Linear` module. Under most circumstances, this name is automatically
|
96 |
+
/// inferred via runtime type information (RTTI). In the unusual circumstance
|
97 |
+
/// that you have this feature disabled, you may want to manually name your
|
98 |
+
/// `Module`s by passing the string name to the `Module` base class'
|
99 |
+
/// constructor.
|
100 |
+
const std::string& name() const noexcept;
|
101 |
+
|
102 |
+
/// Performs a recursive deep copy of the module and all its registered
|
103 |
+
/// parameters, buffers and submodules.
|
104 |
+
///
|
105 |
+
/// Optionally, this method sets the current device
|
106 |
+
/// to the one supplied before cloning. If no device is given, each
|
107 |
+
/// parameter and buffer will be moved to the device of its source.
|
108 |
+
///
|
109 |
+
/// \rst
|
110 |
+
/// .. attention::
|
111 |
+
/// Attempting to call the `clone()` method inherited from the base `Module`
|
112 |
+
/// class (the one documented here) will fail. To inherit an actual
|
113 |
+
/// implementation of `clone()`, you must subclass `Cloneable`. `Cloneable`
|
114 |
+
/// is templatized on the concrete module type, and can thus properly copy a
|
115 |
+
/// `Module`. This method is provided on the base class' API solely for an
|
116 |
+
/// easier-to-use polymorphic interface.
|
117 |
+
/// \endrst
|
118 |
+
virtual std::shared_ptr<Module> clone(
|
119 |
+
const optional<Device>& device = nullopt) const;
|
120 |
+
|
121 |
+
/// Applies the `function` to the `Module` and recursively to every submodule.
|
122 |
+
/// The function must accept a `Module&`.
|
123 |
+
///
|
124 |
+
/// \rst
|
125 |
+
/// .. code-block:: cpp
|
126 |
+
/// MyModule module;
|
127 |
+
/// module->apply([](nn::Module& module) {
|
128 |
+
/// std::cout << module.name() << std::endl;
|
129 |
+
/// });
|
130 |
+
/// \endrst
|
131 |
+
void apply(const ModuleApplyFunction& function);
|
132 |
+
|
133 |
+
/// Applies the `function` to the `Module` and recursively to every submodule.
|
134 |
+
/// The function must accept a `const Module&`.
|
135 |
+
///
|
136 |
+
/// \rst
|
137 |
+
/// .. code-block:: cpp
|
138 |
+
/// MyModule module;
|
139 |
+
/// module->apply([](const nn::Module& module) {
|
140 |
+
/// std::cout << module.name() << std::endl;
|
141 |
+
/// });
|
142 |
+
/// \endrst
|
143 |
+
void apply(const ConstModuleApplyFunction& function) const;
|
144 |
+
|
145 |
+
/// Applies the `function` to the `Module` and recursively to every submodule.
|
146 |
+
/// The function must accept a `const std::string&` for the key of the module,
|
147 |
+
/// and a `Module&`. The key of the module itself is the empty string. If
|
148 |
+
/// `name_prefix` is given, it is prepended to every key as
|
149 |
+
/// `<name_prefix>.<key>` (and just `name_prefix` for the module itself).
|
150 |
+
///
|
151 |
+
/// \rst
|
152 |
+
/// .. code-block:: cpp
|
153 |
+
/// MyModule module;
|
154 |
+
/// module->apply([](const std::string& key, nn::Module& module) {
|
155 |
+
/// std::cout << key << ": " << module.name() << std::endl;
|
156 |
+
/// });
|
157 |
+
/// \endrst
|
158 |
+
void apply(
|
159 |
+
const NamedModuleApplyFunction& function,
|
160 |
+
const std::string& name_prefix = std::string());
|
161 |
+
|
162 |
+
/// Applies the `function` to the `Module` and recursively to every submodule.
|
163 |
+
/// The function must accept a `const std::string&` for the key of the module,
|
164 |
+
/// and a `const Module&`. The key of the module itself is the empty string.
|
165 |
+
/// If `name_prefix` is given, it is prepended to every key as
|
166 |
+
/// `<name_prefix>.<key>` (and just `name_prefix` for the module itself).
|
167 |
+
///
|
168 |
+
/// \rst
|
169 |
+
/// .. code-block:: cpp
|
170 |
+
/// MyModule module;
|
171 |
+
/// module->apply([](const std::string& key, const nn::Module& module) {
|
172 |
+
/// std::cout << key << ": " << module.name() << std::endl;
|
173 |
+
/// });
|
174 |
+
/// \endrst
|
175 |
+
void apply(
|
176 |
+
const ConstNamedModuleApplyFunction& function,
|
177 |
+
const std::string& name_prefix = std::string()) const;
|
178 |
+
|
179 |
+
/// Applies the `function` to the `Module` and recursively to every submodule.
|
180 |
+
/// The function must accept a `const std::shared_ptr<Module>&`.
|
181 |
+
///
|
182 |
+
/// \rst
|
183 |
+
/// .. code-block:: cpp
|
184 |
+
/// MyModule module;
|
185 |
+
/// module->apply([](const std::shared_ptr<nn::Module>& module) {
|
186 |
+
/// std::cout << module->name() << std::endl;
|
187 |
+
/// });
|
188 |
+
/// \endrst
|
189 |
+
void apply(const ModulePointerApplyFunction& function) const;
|
190 |
+
|
191 |
+
/// Applies the `function` to the `Module` and recursively to every submodule.
|
192 |
+
/// The function must accept a `const std::string&` for the key of the module,
|
193 |
+
/// and a `const std::shared_ptr<Module>&`. The key of the module itself is
|
194 |
+
/// the empty string. If `name_prefix` is given, it is prepended to every key
|
195 |
+
/// as
|
196 |
+
/// `<name_prefix>.<key>` (and just `name_prefix` for the module itself).
|
197 |
+
///
|
198 |
+
/// \rst
|
199 |
+
/// .. code-block:: cpp
|
200 |
+
/// MyModule module;
|
201 |
+
/// module->apply([](const std::string& key,
|
202 |
+
/// const std::shared_ptr<nn::Module>& module) {
|
203 |
+
/// std::cout << key << ": " << module->name() << std::endl;
|
204 |
+
/// });
|
205 |
+
/// \endrst
|
206 |
+
void apply(
|
207 |
+
const NamedModulePointerApplyFunction& function,
|
208 |
+
const std::string& name_prefix = std::string()) const;
|
209 |
+
|
210 |
+
/// Returns the parameters of this `Module` and if `recurse` is true, also
|
211 |
+
/// recursively of every submodule.
|
212 |
+
std::vector<Tensor> parameters(bool recurse = true) const;
|
213 |
+
|
214 |
+
/// Returns an `OrderedDict` with the parameters of this `Module` along with
|
215 |
+
/// their keys, and if `recurse` is true also recursively of every submodule.
|
216 |
+
OrderedDict<std::string, Tensor> named_parameters(bool recurse = true) const;
|
217 |
+
|
218 |
+
/// Returns the buffers of this `Module` and if `recurse` is true, also
|
219 |
+
/// recursively of every submodule.
|
220 |
+
std::vector<Tensor> buffers(bool recurse = true) const;
|
221 |
+
|
222 |
+
/// Returns an `OrderedDict` with the buffers of this `Module` along with
|
223 |
+
/// their keys, and if `recurse` is true also recursively of every submodule.
|
224 |
+
OrderedDict<std::string, Tensor> named_buffers(bool recurse = true) const;
|
225 |
+
|
226 |
+
/// Returns the submodules of this `Module` (the entire submodule hierarchy)
|
227 |
+
/// and if `include_self` is true, also inserts a `shared_ptr` to this module
|
228 |
+
/// in the first position.
|
229 |
+
///
|
230 |
+
/// \rst
|
231 |
+
/// .. warning::
|
232 |
+
/// Only pass `include_self` as `true` if this `Module` is stored in a
|
233 |
+
/// `shared_ptr`! Otherwise an exception will be thrown. You may still call
|
234 |
+
/// this method with `include_self` set to false if your `Module` is not
|
235 |
+
/// stored in a `shared_ptr`.
|
236 |
+
/// \endrst
|
237 |
+
std::vector<std::shared_ptr<Module>> modules(bool include_self = true) const;
|
238 |
+
|
239 |
+
/// Returns an `OrderedDict` of the submodules of this `Module` (the entire
|
240 |
+
/// submodule hierarchy) and their keys, and if `include_self` is true, also
|
241 |
+
/// inserts a `shared_ptr` to this module in the first position. If
|
242 |
+
/// `name_prefix` is given, it is prepended to every key as
|
243 |
+
/// `<name_prefix>.<key>` (and just `name_prefix` for the module itself).
|
244 |
+
///
|
245 |
+
/// \rst
|
246 |
+
/// .. warning::
|
247 |
+
/// Only pass `include_self` as `true` if this `Module` is stored in a
|
248 |
+
/// `shared_ptr`! Otherwise an exception will be thrown. You may still call
|
249 |
+
/// this method with `include_self` set to false if your `Module` is not
|
250 |
+
/// stored in a `shared_ptr`.
|
251 |
+
/// \endrst
|
252 |
+
OrderedDict<std::string, std::shared_ptr<Module>> named_modules(
|
253 |
+
const std::string& name_prefix = std::string(),
|
254 |
+
bool include_self = true) const;
|
255 |
+
|
256 |
+
/// Returns the direct submodules of this `Module`.
|
257 |
+
std::vector<std::shared_ptr<Module>> children() const;
|
258 |
+
|
259 |
+
/// Returns an `OrderedDict` of the direct submodules of this `Module` and
|
260 |
+
/// their keys.
|
261 |
+
OrderedDict<std::string, std::shared_ptr<Module>> named_children() const;
|
262 |
+
|
263 |
+
/// Enables "training" mode.
|
264 |
+
virtual void train(bool on = true);
|
265 |
+
|
266 |
+
/// Calls train(false) to enable "eval" mode.
|
267 |
+
/// Do not override this method, override `train()` instead.
|
268 |
+
void eval();
|
269 |
+
|
270 |
+
/// True if the module is in training mode.
|
271 |
+
///
|
272 |
+
/// Every `Module` has a boolean associated with it that determines whether
|
273 |
+
/// the `Module` is currently in *training* mode (set via `.train()`) or in
|
274 |
+
/// *evaluation* (inference) mode (set via `.eval()`). This property is
|
275 |
+
/// exposed via `is_training()`, and may be used by the implementation of a
|
276 |
+
/// concrete module to modify its runtime behavior. See the `BatchNorm` or
|
277 |
+
/// `Dropout` modules for examples of `Module`s that use different code paths
|
278 |
+
/// depending on this property.
|
279 |
+
virtual bool is_training() const noexcept;
|
280 |
+
|
281 |
+
/// Recursively casts all parameters to the given `dtype` and `device`.
|
282 |
+
///
|
283 |
+
/// If `non_blocking` is true and the source is in pinned memory and
|
284 |
+
/// destination is on the GPU or vice versa, the copy is performed
|
285 |
+
/// asynchronously with respect to the host. Otherwise, the argument has no
|
286 |
+
/// effect.
|
287 |
+
virtual void to(
|
288 |
+
torch::Device device,
|
289 |
+
torch::Dtype dtype,
|
290 |
+
bool non_blocking = false);
|
291 |
+
|
292 |
+
/// Recursively casts all parameters to the given dtype.
|
293 |
+
///
|
294 |
+
/// If `non_blocking` is true and the source is in pinned memory and
|
295 |
+
/// destination is on the GPU or vice versa, the copy is performed
|
296 |
+
/// asynchronously with respect to the host. Otherwise, the argument has no
|
297 |
+
/// effect.
|
298 |
+
virtual void to(torch::Dtype dtype, bool non_blocking = false);
|
299 |
+
|
300 |
+
/// Recursively moves all parameters to the given device.
|
301 |
+
///
|
302 |
+
/// If `non_blocking` is true and the source is in pinned memory and
|
303 |
+
/// destination is on the GPU or vice versa, the copy is performed
|
304 |
+
/// asynchronously with respect to the host. Otherwise, the argument has no
|
305 |
+
/// effect.
|
306 |
+
virtual void to(torch::Device device, bool non_blocking = false);
|
307 |
+
|
308 |
+
/// Recursively zeros out the `grad` value of each registered parameter.
|
309 |
+
virtual void zero_grad(bool set_to_none = true);
|
310 |
+
|
311 |
+
/// Attempts to cast this `Module` to the given `ModuleType`.
|
312 |
+
///
|
313 |
+
/// This method is useful when calling `apply()`.
|
314 |
+
/// \rst
|
315 |
+
/// .. code-block:: cpp
|
316 |
+
///
|
317 |
+
/// void initialize_weights(nn::Module& module) {
|
318 |
+
/// torch::NoGradGuard no_grad;
|
319 |
+
/// if (auto* linear = module.as<nn::Linear>()) {
|
320 |
+
/// linear->weight.normal_(0.0, 0.02);
|
321 |
+
/// }
|
322 |
+
/// }
|
323 |
+
///
|
324 |
+
/// MyModule module;
|
325 |
+
/// module->apply(initialize_weights);
|
326 |
+
/// \endrst
|
327 |
+
template <typename ModuleType>
|
328 |
+
typename ModuleType::ContainedType* as() noexcept;
|
329 |
+
|
330 |
+
/// Attempts to cast this `Module` to the given `ModuleType`.
|
331 |
+
///
|
332 |
+
/// This method is useful when calling `apply()`.
|
333 |
+
/// \rst
|
334 |
+
/// .. code-block:: cpp
|
335 |
+
/// void initialize_weights(nn::Module& module) {
|
336 |
+
/// torch::NoGradGuard no_grad;
|
337 |
+
/// if (auto* linear = module.as<nn::Linear>()) {
|
338 |
+
/// linear->weight.normal_(0.0, 0.02);
|
339 |
+
/// }
|
340 |
+
/// }
|
341 |
+
///
|
342 |
+
/// MyModule module;
|
343 |
+
/// module->apply(initialize_weights);
|
344 |
+
/// \endrst
|
345 |
+
template <typename ModuleType>
|
346 |
+
const typename ModuleType::ContainedType* as() const noexcept;
|
347 |
+
|
348 |
+
/// Attempts to cast this `Module` to the given `ModuleType`.
|
349 |
+
///
|
350 |
+
/// This method is useful when calling `apply()`.
|
351 |
+
/// \rst
|
352 |
+
/// .. code-block:: cpp
|
353 |
+
///
|
354 |
+
/// void initialize_weights(nn::Module& module) {
|
355 |
+
/// torch::NoGradGuard no_grad;
|
356 |
+
/// if (auto* linear = module.as<nn::Linear>()) {
|
357 |
+
/// linear->weight.normal_(0.0, 0.02);
|
358 |
+
/// }
|
359 |
+
/// }
|
360 |
+
///
|
361 |
+
/// MyModule module;
|
362 |
+
/// module.apply(initialize_weights);
|
363 |
+
/// \endrst
|
364 |
+
template <
|
365 |
+
typename ModuleType,
|
366 |
+
typename = torch::detail::disable_if_module_holder_t<ModuleType>>
|
367 |
+
ModuleType* as() noexcept;
|
368 |
+
|
369 |
+
/// Attempts to cast this `Module` to the given `ModuleType`.
|
370 |
+
///
|
371 |
+
/// This method is useful when calling `apply()`.
|
372 |
+
/// \rst
|
373 |
+
/// .. code-block:: cpp
|
374 |
+
///
|
375 |
+
/// void initialize_weights(nn::Module& module) {
|
376 |
+
/// torch::NoGradGuard no_grad;
|
377 |
+
/// if (auto* linear = module.as<nn::Linear>()) {
|
378 |
+
/// linear->weight.normal_(0.0, 0.02);
|
379 |
+
/// }
|
380 |
+
/// }
|
381 |
+
///
|
382 |
+
/// MyModule module;
|
383 |
+
/// module.apply(initialize_weights);
|
384 |
+
/// \endrst
|
385 |
+
template <
|
386 |
+
typename ModuleType,
|
387 |
+
typename = torch::detail::disable_if_module_holder_t<ModuleType>>
|
388 |
+
const ModuleType* as() const noexcept;
|
389 |
+
|
390 |
+
/// Serializes the `Module` into the given `OutputArchive`.
|
391 |
+
///
|
392 |
+
/// If the `Module` contains unserializable submodules (e.g.
|
393 |
+
/// `nn::Functional`), those submodules are skipped when serializing.
|
394 |
+
virtual void save(serialize::OutputArchive& archive) const;
|
395 |
+
|
396 |
+
/// Deserializes the `Module` from the given `InputArchive`.
|
397 |
+
///
|
398 |
+
/// If the `Module` contains unserializable submodules (e.g.
|
399 |
+
/// `nn::Functional`), we don't check the existence of those submodules in the
|
400 |
+
/// `InputArchive` when deserializing.
|
401 |
+
virtual void load(serialize::InputArchive& archive);
|
402 |
+
|
403 |
+
/// Streams a pretty representation of the `Module` into the given `stream`.
|
404 |
+
/// By default, this representation will be the name of the module (taken from
|
405 |
+
/// `name()`), followed by a recursive pretty print of all of the `Module`'s
|
406 |
+
/// submodules.
|
407 |
+
///
|
408 |
+
/// Override this method to change the pretty print. The input
|
409 |
+
/// `stream` should be returned from the method, to allow easy chaining.
|
410 |
+
virtual void pretty_print(std::ostream& stream) const;
|
411 |
+
|
412 |
+
/// Returns whether the `Module` is serializable.
|
413 |
+
virtual bool is_serializable() const;
|
414 |
+
|
415 |
+
/// Registers a parameter with this `Module`.
|
416 |
+
///
|
417 |
+
/// A parameter should be any gradient-recording tensor used in the
|
418 |
+
/// implementation of your `Module`. Registering it makes it available to
|
419 |
+
/// methods such as `parameters()`, `clone()` or `to().`
|
420 |
+
///
|
421 |
+
/// Note that registering an undefined Tensor (e.g.
|
422 |
+
/// `module.register_parameter("param", Tensor())`) is allowed, and is
|
423 |
+
/// equivalent to `module.register_parameter("param", None)` in Python API.
|
424 |
+
///
|
425 |
+
/// \rst
|
426 |
+
/// .. code-block:: cpp
|
427 |
+
///
|
428 |
+
/// MyModule::MyModule() {
|
429 |
+
/// weight_ = register_parameter("weight", torch::randn({A, B}));
|
430 |
+
/// }
|
431 |
+
/// \endrst
|
432 |
+
Tensor& register_parameter(
|
433 |
+
std::string name,
|
434 |
+
Tensor tensor,
|
435 |
+
bool requires_grad = true);
|
436 |
+
|
437 |
+
/// Registers a buffer with this `Module`.
|
438 |
+
///
|
439 |
+
/// A buffer is intended to be state in your module that does not record
|
440 |
+
/// gradients, such as running statistics. Registering it makes it available
|
441 |
+
/// to methods such as `buffers()`, `clone()` or `to().
|
442 |
+
///
|
443 |
+
/// \rst
|
444 |
+
/// .. code-block:: cpp
|
445 |
+
///
|
446 |
+
/// MyModule::MyModule() {
|
447 |
+
/// mean_ = register_buffer("mean", torch::empty({num_features_}));
|
448 |
+
/// }
|
449 |
+
/// \endrst
|
450 |
+
Tensor& register_buffer(std::string name, Tensor tensor);
|
451 |
+
|
452 |
+
/// Registers a submodule with this `Module`.
|
453 |
+
///
|
454 |
+
/// Registering a module makes it available to methods such as `modules()`,
|
455 |
+
/// `clone()` or `to()`.
|
456 |
+
///
|
457 |
+
/// \rst
|
458 |
+
/// .. code-block:: cpp
|
459 |
+
///
|
460 |
+
/// MyModule::MyModule() {
|
461 |
+
/// submodule_ = register_module("linear", torch::nn::Linear(3, 4));
|
462 |
+
/// }
|
463 |
+
/// \endrst
|
464 |
+
template <typename ModuleType>
|
465 |
+
std::shared_ptr<ModuleType> register_module(
|
466 |
+
std::string name,
|
467 |
+
std::shared_ptr<ModuleType> module);
|
468 |
+
|
469 |
+
/// Registers a submodule with this `Module`.
|
470 |
+
///
|
471 |
+
/// This method deals with `ModuleHolder`s.
|
472 |
+
///
|
473 |
+
/// Registering a module makes it available to methods such as `modules()`,
|
474 |
+
/// `clone()` or `to()`.
|
475 |
+
///
|
476 |
+
/// \rst
|
477 |
+
/// .. code-block:: cpp
|
478 |
+
///
|
479 |
+
/// MyModule::MyModule() {
|
480 |
+
/// submodule_ = register_module("linear", torch::nn::Linear(3, 4));
|
481 |
+
/// }
|
482 |
+
/// \endrst
|
483 |
+
template <typename ModuleType>
|
484 |
+
std::shared_ptr<ModuleType> register_module(
|
485 |
+
std::string name,
|
486 |
+
ModuleHolder<ModuleType> module_holder);
|
487 |
+
|
488 |
+
/// Replaces a registered submodule with this `Module`.
|
489 |
+
///
|
490 |
+
/// This takes care of the registration, if you used submodule members, you
|
491 |
+
/// should
|
492 |
+
// assign the submodule as well, i.e. use as
|
493 |
+
/// module->submodule_ = module->replace_module("linear",
|
494 |
+
/// torch::nn::Linear(3, 4));
|
495 |
+
/// It only works when a module of the name is already registered.
|
496 |
+
///
|
497 |
+
/// This is useful for replacing a module after initialization, e.g.
|
498 |
+
/// for finetuning.
|
499 |
+
template <typename ModuleType>
|
500 |
+
std::shared_ptr<ModuleType> replace_module(
|
501 |
+
const std::string& name,
|
502 |
+
std::shared_ptr<ModuleType> module);
|
503 |
+
|
504 |
+
/// Replaces a registered submodule with this `Module`.
|
505 |
+
/// This method deals with `ModuleHolder`s.
|
506 |
+
///
|
507 |
+
/// This takes care of the registration, if you used submodule members, you
|
508 |
+
/// should
|
509 |
+
// assign the submodule as well, i.e. use as
|
510 |
+
/// module->submodule_ = module->replace_module("linear", linear_holder);
|
511 |
+
/// It only works when a module of the name is already registered.
|
512 |
+
///
|
513 |
+
/// This is useful for replacing a module after initialization, e.g.
|
514 |
+
/// for finetuning.
|
515 |
+
template <typename ModuleType>
|
516 |
+
std::shared_ptr<ModuleType> replace_module(
|
517 |
+
const std::string& name,
|
518 |
+
ModuleHolder<ModuleType> module_holder);
|
519 |
+
|
520 |
+
/// Unregisters a submodule from this `Module`. If there is no such module
|
521 |
+
/// with `name` an exception is thrown.
|
522 |
+
void unregister_module(const std::string& name);
|
523 |
+
|
524 |
+
protected:
|
525 |
+
/// The following three functions allow a module with default arguments in its
|
526 |
+
/// forward method to be used in a Sequential module.
|
527 |
+
/// You should NEVER override these functions manually. Instead, you should
|
528 |
+
/// use the `FORWARD_HAS_DEFAULT_ARGS` macro.
|
529 |
+
virtual bool _forward_has_default_args() {
|
530 |
+
return false;
|
531 |
+
}
|
532 |
+
|
533 |
+
virtual unsigned int _forward_num_required_args() {
|
534 |
+
TORCH_CHECK(
|
535 |
+
false,
|
536 |
+
"torch::nn::Module subclass that has default arguments in `forward` method ",
|
537 |
+
"must override `_forward_num_required_args` method. Please use ",
|
538 |
+
"`FORWARD_HAS_DEFAULT_ARGS` macro to do so.");
|
539 |
+
}
|
540 |
+
|
541 |
+
virtual std::vector<AnyValue> _forward_populate_default_args(
|
542 |
+
std::vector<AnyValue>&& arguments) {
|
543 |
+
TORCH_CHECK(
|
544 |
+
false,
|
545 |
+
"torch::nn::Module subclass that has default arguments in `forward` method ",
|
546 |
+
"must override `_forward_populate_default_args` method. Please use ",
|
547 |
+
"`FORWARD_HAS_DEFAULT_ARGS` macro to do so.");
|
548 |
+
}
|
549 |
+
|
550 |
+
/// The registered parameters of this `Module`.
|
551 |
+
/// Inorder to access parameters_ in ParameterDict and ParameterList
|
552 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
553 |
+
OrderedDict<std::string, Tensor> parameters_;
|
554 |
+
|
555 |
+
private:
|
556 |
+
// Friend classes.
|
557 |
+
|
558 |
+
template <typename Derived>
|
559 |
+
friend class Cloneable;
|
560 |
+
|
561 |
+
template <typename ModuleType, typename... ArgumentTypes>
|
562 |
+
friend struct AnyModuleHolder;
|
563 |
+
|
564 |
+
/// Pretty prints the given `Module` into the `ostream`.
|
565 |
+
TORCH_API friend std::ostream& operator<<(
|
566 |
+
std::ostream& stream,
|
567 |
+
const nn::Module& module);
|
568 |
+
|
569 |
+
// data parallel using this method to configure gradient edges during the
|
570 |
+
// replicate step.
|
571 |
+
template <typename ModuleType>
|
572 |
+
friend void replicate_grad_edges(
|
573 |
+
const std::shared_ptr<Module>& module,
|
574 |
+
const std::vector<std::shared_ptr<ModuleType>>& replicas,
|
575 |
+
const std::vector<Device>& devices);
|
576 |
+
|
577 |
+
// Private methods.
|
578 |
+
|
579 |
+
/// Used in the implementation of `Cloneable`.
|
580 |
+
virtual void clone_(Module& other, const optional<Device>& device);
|
581 |
+
|
582 |
+
/// The implementation of the various `to()` methods.
|
583 |
+
template <typename... Ts>
|
584 |
+
void to_impl(Ts&&... ts);
|
585 |
+
|
586 |
+
/// Implements pretty printing the module hierarchy.
|
587 |
+
void pretty_print_recursive(
|
588 |
+
std::ostream& stream,
|
589 |
+
const std::string& indentation) const;
|
590 |
+
|
591 |
+
/// Applies the `function` to every submodule recursively, starting at this
|
592 |
+
/// `Module`'s children (thus not including the module itself).
|
593 |
+
void apply_to_submodules(
|
594 |
+
const NamedModulePointerApplyFunction& function,
|
595 |
+
const std::string& name_prefix = std::string()) const;
|
596 |
+
|
597 |
+
/// Returns a shared_ptr to `this` in a safe (checked) way.
|
598 |
+
std::shared_ptr<Module> shared_from_this_checked() const;
|
599 |
+
|
600 |
+
/// The registered buffers of this `Module`.
|
601 |
+
OrderedDict<std::string, Tensor> buffers_;
|
602 |
+
|
603 |
+
/// The registered (direct) submodules of this `Module`.
|
604 |
+
OrderedDict<std::string, std::shared_ptr<Module>> children_;
|
605 |
+
|
606 |
+
/// The module's name (e.g. "LSTM").
|
607 |
+
mutable optional<std::string> name_;
|
608 |
+
|
609 |
+
/// Whether the module is in training mode.
|
610 |
+
bool is_training_{true};
|
611 |
+
};
|
612 |
+
|
613 |
+
/// Serialize a `Module` pointer into an `OutputArchive`.
|
614 |
+
TORCH_API serialize::OutputArchive& operator<<(
|
615 |
+
serialize::OutputArchive& archive,
|
616 |
+
const std::shared_ptr<nn::Module>& module);
|
617 |
+
|
618 |
+
/// Deserializes a `Module` from an `InputArchive`.
|
619 |
+
TORCH_API serialize::InputArchive& operator>>(
|
620 |
+
serialize::InputArchive& archive,
|
621 |
+
const std::shared_ptr<nn::Module>& module);
|
622 |
+
|
623 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nn::Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
624 |
+
|
625 |
+
template <typename ModuleType>
|
626 |
+
typename ModuleType::ContainedType* Module::as() noexcept {
|
627 |
+
// Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for
|
628 |
+
// `Linear`, since `LinearImpl` inherits `nn::Module`.
|
629 |
+
return as<typename ModuleType::ContainedType>();
|
630 |
+
}
|
631 |
+
|
632 |
+
template <typename ModuleType>
|
633 |
+
const typename ModuleType::ContainedType* Module::as() const noexcept {
|
634 |
+
// Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for
|
635 |
+
// `Linear`, since `LinearImpl` inherits `nn::Module`.
|
636 |
+
return as<typename ModuleType::ContainedType>();
|
637 |
+
}
|
638 |
+
|
639 |
+
template <typename ModuleType, typename>
|
640 |
+
ModuleType* Module::as() noexcept {
|
641 |
+
return dynamic_cast<ModuleType*>(this);
|
642 |
+
}
|
643 |
+
|
644 |
+
template <typename ModuleType, typename>
|
645 |
+
const ModuleType* Module::as() const noexcept {
|
646 |
+
return dynamic_cast<const ModuleType*>(this);
|
647 |
+
}
|
648 |
+
|
649 |
+
template <typename ModuleType>
|
650 |
+
std::shared_ptr<ModuleType> Module::register_module(
|
651 |
+
std::string name,
|
652 |
+
std::shared_ptr<ModuleType> module) {
|
653 |
+
TORCH_CHECK(!name.empty(), "Submodule name must not be empty");
|
654 |
+
TORCH_CHECK(
|
655 |
+
name.find('.') == std::string::npos,
|
656 |
+
"Submodule name must not contain a dot (got '",
|
657 |
+
name,
|
658 |
+
"')");
|
659 |
+
auto& base_module = children_.insert(std::move(name), std::move(module));
|
660 |
+
return std::dynamic_pointer_cast<ModuleType>(base_module);
|
661 |
+
}
|
662 |
+
|
663 |
+
template <typename ModuleType>
|
664 |
+
std::shared_ptr<ModuleType> Module::register_module(
|
665 |
+
std::string name,
|
666 |
+
ModuleHolder<ModuleType> module_holder) {
|
667 |
+
return register_module(std::move(name), module_holder.ptr());
|
668 |
+
}
|
669 |
+
|
670 |
+
template <typename ModuleType>
|
671 |
+
std::shared_ptr<ModuleType> Module::replace_module(
|
672 |
+
const std::string& name,
|
673 |
+
std::shared_ptr<ModuleType> module) {
|
674 |
+
auto& base_module = (children_[name] = std::move(module));
|
675 |
+
return std::dynamic_pointer_cast<ModuleType>(base_module);
|
676 |
+
}
|
677 |
+
|
678 |
+
template <typename ModuleType>
|
679 |
+
std::shared_ptr<ModuleType> Module::replace_module(
|
680 |
+
const std::string& name,
|
681 |
+
ModuleHolder<ModuleType> module_holder) {
|
682 |
+
return replace_module(name, module_holder.ptr());
|
683 |
+
}
|
684 |
+
|
685 |
+
template <typename... Ts>
|
686 |
+
void Module::to_impl(Ts&&... ts) {
|
687 |
+
// First call `to()` on every child module.
|
688 |
+
for (auto& child : children_) {
|
689 |
+
child.value()->to(ts...);
|
690 |
+
}
|
691 |
+
// Then move every parameter to the new dtype/device.
|
692 |
+
for (auto& parameter : named_parameters(/*recurse=*/false)) {
|
693 |
+
parameter->set_data(autograd::Variable(*parameter).to(ts...));
|
694 |
+
}
|
695 |
+
// Then move every buffer to the new dtype/device.
|
696 |
+
for (auto& buffer : named_buffers(/*recurse=*/false)) {
|
697 |
+
buffer->set_data(autograd::Variable(*buffer).to(ts...));
|
698 |
+
}
|
699 |
+
}
|
700 |
+
|
701 |
+
} // namespace nn
|
702 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// Common
|
4 |
+
#include <torch/nn/modules/common.h>
|
5 |
+
|
6 |
+
// Containers
|
7 |
+
#include <torch/nn/modules/container/any.h>
|
8 |
+
#include <torch/nn/modules/container/functional.h>
|
9 |
+
#include <torch/nn/modules/container/moduledict.h>
|
10 |
+
#include <torch/nn/modules/container/modulelist.h>
|
11 |
+
#include <torch/nn/modules/container/named_any.h>
|
12 |
+
#include <torch/nn/modules/container/parameterdict.h>
|
13 |
+
#include <torch/nn/modules/container/parameterlist.h>
|
14 |
+
#include <torch/nn/modules/container/sequential.h>
|
15 |
+
|
16 |
+
// Layers
|
17 |
+
#include <torch/nn/modules/activation.h>
|
18 |
+
#include <torch/nn/modules/adaptive.h>
|
19 |
+
#include <torch/nn/modules/batchnorm.h>
|
20 |
+
#include <torch/nn/modules/conv.h>
|
21 |
+
#include <torch/nn/modules/distance.h>
|
22 |
+
#include <torch/nn/modules/dropout.h>
|
23 |
+
#include <torch/nn/modules/embedding.h>
|
24 |
+
#include <torch/nn/modules/fold.h>
|
25 |
+
#include <torch/nn/modules/instancenorm.h>
|
26 |
+
#include <torch/nn/modules/linear.h>
|
27 |
+
#include <torch/nn/modules/loss.h>
|
28 |
+
#include <torch/nn/modules/normalization.h>
|
29 |
+
#include <torch/nn/modules/padding.h>
|
30 |
+
#include <torch/nn/modules/pixelshuffle.h>
|
31 |
+
#include <torch/nn/modules/pooling.h>
|
32 |
+
#include <torch/nn/modules/rnn.h>
|
33 |
+
#include <torch/nn/modules/transformer.h>
|
34 |
+
#include <torch/nn/modules/transformercoder.h>
|
35 |
+
#include <torch/nn/modules/transformerlayer.h>
|
36 |
+
#include <torch/nn/modules/upsampling.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options.h
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/nn/options/batchnorm.h>
|
4 |
+
#include <torch/nn/options/conv.h>
|
5 |
+
#include <torch/nn/options/dropout.h>
|
6 |
+
#include <torch/nn/options/fold.h>
|
7 |
+
#include <torch/nn/options/linear.h>
|
8 |
+
#include <torch/nn/options/loss.h>
|
9 |
+
#include <torch/nn/options/normalization.h>
|
10 |
+
#include <torch/nn/options/padding.h>
|
11 |
+
#include <torch/nn/options/pixelshuffle.h>
|
12 |
+
#include <torch/nn/options/pooling.h>
|
13 |
+
#include <torch/nn/options/rnn.h>
|
14 |
+
#include <torch/nn/options/transformer.h>
|
15 |
+
#include <torch/nn/options/transformercoder.h>
|
16 |
+
#include <torch/nn/options/transformerlayer.h>
|
17 |
+
#include <torch/nn/options/upsampling.h>
|
18 |
+
#include <torch/nn/options/vision.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/activation.h
ADDED
@@ -0,0 +1,714 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/enum.h>
|
6 |
+
#include <torch/types.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace nn {
|
10 |
+
|
11 |
+
/// Options for the `ELU` module.
|
12 |
+
///
|
13 |
+
/// Example:
|
14 |
+
/// ```
|
15 |
+
/// ELU model(ELUOptions().alpha(42.42).inplace(true));
|
16 |
+
/// ```
|
17 |
+
struct TORCH_API ELUOptions {
|
18 |
+
/// The `alpha` value for the ELU formulation. Default: 1.0
|
19 |
+
TORCH_ARG(double, alpha) = 1.0;
|
20 |
+
|
21 |
+
/// can optionally do the operation in-place. Default: False
|
22 |
+
TORCH_ARG(bool, inplace) = false;
|
23 |
+
};
|
24 |
+
|
25 |
+
namespace functional {
|
26 |
+
/// Options for `torch::nn::functional::elu`.
|
27 |
+
///
|
28 |
+
/// See the documentation for `torch::nn::ELUOptions` class to learn what
|
29 |
+
/// arguments are supported.
|
30 |
+
///
|
31 |
+
/// Example:
|
32 |
+
/// ```
|
33 |
+
/// namespace F = torch::nn::functional;
|
34 |
+
/// F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true));
|
35 |
+
/// ```
|
36 |
+
using ELUFuncOptions = ELUOptions;
|
37 |
+
} // namespace functional
|
38 |
+
|
39 |
+
// ============================================================================
|
40 |
+
|
41 |
+
/// Options for the `SELU` module.
|
42 |
+
///
|
43 |
+
/// Example:
|
44 |
+
/// ```
|
45 |
+
/// SELU model(SELUOptions().inplace(true));
|
46 |
+
/// ```
|
47 |
+
struct TORCH_API SELUOptions {
|
48 |
+
/* implicit */ SELUOptions(bool inplace = false);
|
49 |
+
|
50 |
+
/// can optionally do the operation in-place. Default: False
|
51 |
+
TORCH_ARG(bool, inplace);
|
52 |
+
};
|
53 |
+
|
54 |
+
namespace functional {
|
55 |
+
/// Options for `torch::nn::functional::selu`.
|
56 |
+
///
|
57 |
+
/// See the documentation for `torch::nn::SELUOptions` class to learn what
|
58 |
+
/// arguments are supported.
|
59 |
+
///
|
60 |
+
/// Example:
|
61 |
+
/// ```
|
62 |
+
/// namespace F = torch::nn::functional;
|
63 |
+
/// F::selu(input, F::SELUFuncOptions(false));
|
64 |
+
/// ```
|
65 |
+
using SELUFuncOptions = SELUOptions;
|
66 |
+
} // namespace functional
|
67 |
+
|
68 |
+
// ============================================================================
|
69 |
+
|
70 |
+
/// Options for the `GLU` module.
|
71 |
+
///
|
72 |
+
/// Example:
|
73 |
+
/// ```
|
74 |
+
/// GLU model(GLUOptions(1));
|
75 |
+
/// ```
|
76 |
+
struct TORCH_API GLUOptions {
|
77 |
+
/* implicit */ GLUOptions(int64_t dim = -1);
|
78 |
+
|
79 |
+
/// the dimension on which to split the input. Default: -1
|
80 |
+
TORCH_ARG(int64_t, dim);
|
81 |
+
};
|
82 |
+
|
83 |
+
namespace functional {
|
84 |
+
/// Options for `torch::nn::functional::glu`.
|
85 |
+
///
|
86 |
+
/// See the documentation for `torch::nn::GLUOptions` class to learn what
|
87 |
+
/// arguments are supported.
|
88 |
+
///
|
89 |
+
/// Example:
|
90 |
+
/// ```
|
91 |
+
/// namespace F = torch::nn::functional;
|
92 |
+
/// F::glu(input, GLUFuncOptions(1));
|
93 |
+
/// ```
|
94 |
+
using GLUFuncOptions = GLUOptions;
|
95 |
+
} // namespace functional
|
96 |
+
|
97 |
+
// ============================================================================
|
98 |
+
|
99 |
+
/// Options for the `GELU` module.
|
100 |
+
///
|
101 |
+
/// Example:
|
102 |
+
/// ```
|
103 |
+
/// GELU model(GELUOptions().approximate("none"));
|
104 |
+
/// ```
|
105 |
+
struct TORCH_API GELUOptions {
|
106 |
+
/// Specifies the approximation to apply to the output.
|
107 |
+
TORCH_ARG(std::string, approximate) = "none";
|
108 |
+
};
|
109 |
+
|
110 |
+
namespace functional {
|
111 |
+
/// Options for `torch::nn::functional::gelu`.
|
112 |
+
///
|
113 |
+
/// See the documentation for `torch::nn::GELUOptions` class to learn what
|
114 |
+
/// arguments are supported.
|
115 |
+
///
|
116 |
+
/// Example:
|
117 |
+
/// ```
|
118 |
+
/// namespace F = torch::nn::functional;
|
119 |
+
/// F::gelu(input, F::GELUFuncOptions().approximate("none"));
|
120 |
+
/// ```
|
121 |
+
using GELUFuncOptions = GELUOptions;
|
122 |
+
} // namespace functional
|
123 |
+
|
124 |
+
// ============================================================================
|
125 |
+
|
126 |
+
/// Options for the `Hardshrink` module.
|
127 |
+
///
|
128 |
+
/// Example:
|
129 |
+
/// ```
|
130 |
+
/// Hardshrink model(HardshrinkOptions().lambda(42.42));
|
131 |
+
/// ```
|
132 |
+
struct TORCH_API HardshrinkOptions {
|
133 |
+
/* implicit */ HardshrinkOptions(double lambda = 0.5);
|
134 |
+
|
135 |
+
/// the `lambda` value for the Hardshrink formulation. Default: 0.5
|
136 |
+
TORCH_ARG(double, lambda);
|
137 |
+
};
|
138 |
+
|
139 |
+
namespace functional {
|
140 |
+
/// Options for `torch::nn::functional::hardshrink`.
|
141 |
+
///
|
142 |
+
/// See the documentation for `torch::nn::HardshrinkOptions` class to learn what
|
143 |
+
/// arguments are supported.
|
144 |
+
///
|
145 |
+
/// Example:
|
146 |
+
/// ```
|
147 |
+
/// namespace F = torch::nn::functional;
|
148 |
+
/// F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42));
|
149 |
+
/// ```
|
150 |
+
using HardshrinkFuncOptions = HardshrinkOptions;
|
151 |
+
} // namespace functional
|
152 |
+
|
153 |
+
// ============================================================================
|
154 |
+
|
155 |
+
/// Options for the `Hardtanh` module.
|
156 |
+
///
|
157 |
+
/// Example:
|
158 |
+
/// ```
|
159 |
+
/// Hardtanh
|
160 |
+
/// model(HardtanhOptions().min_val(-42.42).max_val(0.42).inplace(true));
|
161 |
+
/// ```
|
162 |
+
struct TORCH_API HardtanhOptions {
|
163 |
+
/// minimum value of the linear region range. Default: -1
|
164 |
+
TORCH_ARG(double, min_val) = -1.0;
|
165 |
+
|
166 |
+
/// maximum value of the linear region range. Default: 1
|
167 |
+
TORCH_ARG(double, max_val) = 1.0;
|
168 |
+
|
169 |
+
/// can optionally do the operation in-place. Default: False
|
170 |
+
TORCH_ARG(bool, inplace) = false;
|
171 |
+
};
|
172 |
+
|
173 |
+
namespace functional {
|
174 |
+
/// Options for `torch::nn::functional::hardtanh`.
|
175 |
+
///
|
176 |
+
/// See the documentation for `torch::nn::HardtanhOptions` class to learn what
|
177 |
+
/// arguments are supported.
|
178 |
+
///
|
179 |
+
/// Example:
|
180 |
+
/// ```
|
181 |
+
/// namespace F = torch::nn::functional;
|
182 |
+
/// F::hardtanh(x,
|
183 |
+
/// F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true));
|
184 |
+
/// ```
|
185 |
+
using HardtanhFuncOptions = HardtanhOptions;
|
186 |
+
} // namespace functional
|
187 |
+
|
188 |
+
// ============================================================================
|
189 |
+
|
190 |
+
/// Options for the `LeakyReLU` module.
|
191 |
+
///
|
192 |
+
/// Example:
|
193 |
+
/// ```
|
194 |
+
/// LeakyReLU model(LeakyReLUOptions().negative_slope(0.42).inplace(true));
|
195 |
+
/// ```
|
196 |
+
struct TORCH_API LeakyReLUOptions {
|
197 |
+
/// Controls the angle of the negative slope. Default: 1e-2
|
198 |
+
TORCH_ARG(double, negative_slope) = 1e-2;
|
199 |
+
|
200 |
+
/// can optionally do the operation in-place. Default: False
|
201 |
+
TORCH_ARG(bool, inplace) = false;
|
202 |
+
};
|
203 |
+
|
204 |
+
namespace functional {
|
205 |
+
/// Options for `torch::nn::functional::leaky_relu`.
|
206 |
+
///
|
207 |
+
/// See the documentation for `torch::nn::LeakyReLUOptions` class to learn what
|
208 |
+
/// arguments are supported.
|
209 |
+
///
|
210 |
+
/// Example:
|
211 |
+
/// ```
|
212 |
+
/// namespace F = torch::nn::functional;
|
213 |
+
/// F::leaky_relu(x,
|
214 |
+
/// F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true));
|
215 |
+
/// ```
|
216 |
+
using LeakyReLUFuncOptions = LeakyReLUOptions;
|
217 |
+
} // namespace functional
|
218 |
+
|
219 |
+
// ============================================================================
|
220 |
+
|
221 |
+
/// Options for the `Softmax` module.
|
222 |
+
///
|
223 |
+
/// Example:
|
224 |
+
/// ```
|
225 |
+
/// Softmax model(SoftmaxOptions(1));
|
226 |
+
/// ```
|
227 |
+
struct TORCH_API SoftmaxOptions {
|
228 |
+
SoftmaxOptions(int64_t dim);
|
229 |
+
|
230 |
+
/// Dimension along which Softmax will be computed.
|
231 |
+
TORCH_ARG(int64_t, dim);
|
232 |
+
};
|
233 |
+
|
234 |
+
// ============================================================================
|
235 |
+
|
236 |
+
namespace functional {
|
237 |
+
|
238 |
+
/// Options for `torch::nn::functional::softmax`.
|
239 |
+
///
|
240 |
+
/// Example:
|
241 |
+
/// ```
|
242 |
+
/// namespace F = torch::nn::functional;
|
243 |
+
/// F::softmax(input, F::SoftmaxFuncOptions(1));
|
244 |
+
/// ```
|
245 |
+
struct TORCH_API SoftmaxFuncOptions {
|
246 |
+
SoftmaxFuncOptions(int64_t dim);
|
247 |
+
|
248 |
+
/// Dimension along which Softmax will be computed.
|
249 |
+
TORCH_ARG(int64_t, dim);
|
250 |
+
|
251 |
+
/// the desired data type of returned tensor.
|
252 |
+
/// If specified, the input tensor is casted to `dtype` before the operation
|
253 |
+
/// is performed. This is useful for preventing data type overflows. Default:
|
254 |
+
/// None.
|
255 |
+
TORCH_ARG(c10::optional<torch::Dtype>, dtype) = c10::nullopt;
|
256 |
+
};
|
257 |
+
|
258 |
+
} // namespace functional
|
259 |
+
|
260 |
+
// ============================================================================
|
261 |
+
|
262 |
+
/// Options for the `Softmin` module.
|
263 |
+
///
|
264 |
+
/// Example:
|
265 |
+
/// ```
|
266 |
+
/// Softmin model(SoftminOptions(1));
|
267 |
+
/// ```
|
268 |
+
struct TORCH_API SoftminOptions {
|
269 |
+
SoftminOptions(int64_t dim);
|
270 |
+
|
271 |
+
/// Dimension along which Softmin will be computed.
|
272 |
+
TORCH_ARG(int64_t, dim);
|
273 |
+
};
|
274 |
+
|
275 |
+
// ============================================================================
|
276 |
+
|
277 |
+
namespace functional {
|
278 |
+
|
279 |
+
/// Options for `torch::nn::functional::softmin`.
|
280 |
+
///
|
281 |
+
/// Example:
|
282 |
+
/// ```
|
283 |
+
/// namespace F = torch::nn::functional;
|
284 |
+
/// F::softmin(input, F::SoftminFuncOptions(1));
|
285 |
+
/// ```
|
286 |
+
struct TORCH_API SoftminFuncOptions {
|
287 |
+
SoftminFuncOptions(int64_t dim);
|
288 |
+
|
289 |
+
/// Dimension along which Softmin will be computed.
|
290 |
+
TORCH_ARG(int64_t, dim);
|
291 |
+
|
292 |
+
/// the desired data type of returned tensor.
|
293 |
+
/// If specified, the input tensor is casted to `dtype` before the operation
|
294 |
+
/// is performed. This is useful for preventing data type overflows. Default:
|
295 |
+
/// None.
|
296 |
+
TORCH_ARG(c10::optional<torch::Dtype>, dtype) = c10::nullopt;
|
297 |
+
};
|
298 |
+
|
299 |
+
} // namespace functional
|
300 |
+
|
301 |
+
// ============================================================================
|
302 |
+
|
303 |
+
/// Options for the `LogSoftmax` module.
|
304 |
+
///
|
305 |
+
/// Example:
|
306 |
+
/// ```
|
307 |
+
/// LogSoftmax model(LogSoftmaxOptions(1));
|
308 |
+
/// ```
|
309 |
+
struct TORCH_API LogSoftmaxOptions {
|
310 |
+
LogSoftmaxOptions(int64_t dim);
|
311 |
+
|
312 |
+
/// Dimension along which LogSoftmax will be computed.
|
313 |
+
TORCH_ARG(int64_t, dim);
|
314 |
+
};
|
315 |
+
|
316 |
+
// ============================================================================
|
317 |
+
|
318 |
+
namespace functional {
|
319 |
+
|
320 |
+
/// Options for `torch::nn::functional::log_softmax`.
|
321 |
+
///
|
322 |
+
/// Example:
|
323 |
+
/// ```
|
324 |
+
/// namespace F = torch::nn::functional;
|
325 |
+
/// F::log_softmax(input, LogSoftmaxFuncOptions(1));
|
326 |
+
/// ```
|
327 |
+
struct TORCH_API LogSoftmaxFuncOptions {
|
328 |
+
LogSoftmaxFuncOptions(int64_t dim);
|
329 |
+
|
330 |
+
/// Dimension along which LogSoftmax will be computed.
|
331 |
+
TORCH_ARG(int64_t, dim);
|
332 |
+
|
333 |
+
/// the desired data type of returned tensor.
|
334 |
+
/// If specified, the input tensor is casted to `dtype` before the operation
|
335 |
+
/// is performed. This is useful for preventing data type overflows. Default:
|
336 |
+
/// None.
|
337 |
+
TORCH_ARG(c10::optional<torch::Dtype>, dtype) = c10::nullopt;
|
338 |
+
};
|
339 |
+
|
340 |
+
} // namespace functional
|
341 |
+
|
342 |
+
// ============================================================================
|
343 |
+
|
344 |
+
/// Options for the `PReLU` module.
|
345 |
+
///
|
346 |
+
/// Example:
|
347 |
+
/// ```
|
348 |
+
/// PReLU model(PReLUOptions().num_parameters(42));
|
349 |
+
/// ```
|
350 |
+
struct TORCH_API PReLUOptions {
|
351 |
+
/// number of `a` to learn. Although it takes an int as input, there is only
|
352 |
+
/// two values are legitimate: 1, or the number of channels at input. Default:
|
353 |
+
/// 1
|
354 |
+
TORCH_ARG(int64_t, num_parameters) = 1;
|
355 |
+
|
356 |
+
/// the initial value of `a`. Default: 0.25
|
357 |
+
TORCH_ARG(double, init) = 0.25;
|
358 |
+
};
|
359 |
+
|
360 |
+
// ============================================================================
|
361 |
+
|
362 |
+
/// Options for the `ReLU` module.
|
363 |
+
///
|
364 |
+
/// Example:
|
365 |
+
/// ```
|
366 |
+
/// ReLU model(ReLUOptions().inplace(true));
|
367 |
+
/// ```
|
368 |
+
struct TORCH_API ReLUOptions {
|
369 |
+
/* implicit */ ReLUOptions(bool inplace = false);
|
370 |
+
|
371 |
+
/// can optionally do the operation in-place. Default: False
|
372 |
+
TORCH_ARG(bool, inplace);
|
373 |
+
};
|
374 |
+
|
375 |
+
namespace functional {
|
376 |
+
/// Options for `torch::nn::functional::relu`.
|
377 |
+
///
|
378 |
+
/// See the documentation for `torch::nn::ReLUOptions` class to learn what
|
379 |
+
/// arguments are supported.
|
380 |
+
///
|
381 |
+
/// Example:
|
382 |
+
/// ```
|
383 |
+
/// namespace F = torch::nn::functional;
|
384 |
+
/// F::relu(x, F::ReLUFuncOptions().inplace(true));
|
385 |
+
/// ```
|
386 |
+
using ReLUFuncOptions = ReLUOptions;
|
387 |
+
} // namespace functional
|
388 |
+
|
389 |
+
// ============================================================================
|
390 |
+
|
391 |
+
/// Options for the `ReLU6` module.
|
392 |
+
///
|
393 |
+
/// Example:
|
394 |
+
/// ```
|
395 |
+
/// ReLU6 model(ReLU6Options().inplace(true));
|
396 |
+
/// ```
|
397 |
+
struct TORCH_API ReLU6Options {
|
398 |
+
/* implicit */ ReLU6Options(bool inplace = false);
|
399 |
+
|
400 |
+
/// can optionally do the operation in-place. Default: False
|
401 |
+
TORCH_ARG(bool, inplace);
|
402 |
+
};
|
403 |
+
|
404 |
+
namespace functional {
|
405 |
+
/// Options for `torch::nn::functional::relu6`.
|
406 |
+
///
|
407 |
+
/// See the documentation for `torch::nn::ReLU6Options` class to learn what
|
408 |
+
/// arguments are supported.
|
409 |
+
///
|
410 |
+
/// Example:
|
411 |
+
/// ```
|
412 |
+
/// namespace F = torch::nn::functional;
|
413 |
+
/// F::relu6(x, F::ReLU6FuncOptions().inplace(true));
|
414 |
+
/// ```
|
415 |
+
using ReLU6FuncOptions = ReLU6Options;
|
416 |
+
} // namespace functional
|
417 |
+
|
418 |
+
// ============================================================================
|
419 |
+
|
420 |
+
/// Options for the `RReLU` module.
|
421 |
+
///
|
422 |
+
/// Example:
|
423 |
+
/// ```
|
424 |
+
/// RReLU model(RReLUOptions().lower(0.24).upper(0.42).inplace(true));
|
425 |
+
/// ```
|
426 |
+
struct TORCH_API RReLUOptions {
|
427 |
+
/// lower bound of the uniform distribution. Default: 1/8
|
428 |
+
TORCH_ARG(double, lower) = 1.0 / 8.0;
|
429 |
+
|
430 |
+
/// upper bound of the uniform distribution. Default: 1/3
|
431 |
+
TORCH_ARG(double, upper) = 1.0 / 3.0;
|
432 |
+
|
433 |
+
/// can optionally do the operation in-place. Default: False
|
434 |
+
TORCH_ARG(bool, inplace) = false;
|
435 |
+
};
|
436 |
+
|
437 |
+
// ============================================================================
|
438 |
+
|
439 |
+
namespace functional {
|
440 |
+
|
441 |
+
/// Options for `torch::nn::functional::rrelu`.
|
442 |
+
///
|
443 |
+
/// Example:
|
444 |
+
/// ```
|
445 |
+
/// namespace F = torch::nn::functional;
|
446 |
+
/// F::rrelu(x, F::RReLUFuncOptions().lower(0.1).upper(0.4).inplace(true));
|
447 |
+
/// ```
|
448 |
+
struct TORCH_API RReLUFuncOptions {
|
449 |
+
/// lower bound of the uniform distribution. Default: 1/8
|
450 |
+
TORCH_ARG(double, lower) = 1.0 / 8.0;
|
451 |
+
|
452 |
+
/// upper bound of the uniform distribution. Default: 1/3
|
453 |
+
TORCH_ARG(double, upper) = 1.0 / 3.0;
|
454 |
+
|
455 |
+
TORCH_ARG(bool, training) = false;
|
456 |
+
|
457 |
+
/// can optionally do the operation in-place. Default: False
|
458 |
+
TORCH_ARG(bool, inplace) = false;
|
459 |
+
};
|
460 |
+
|
461 |
+
} // namespace functional
|
462 |
+
|
463 |
+
// ============================================================================
|
464 |
+
|
465 |
+
/// Options for the `CELU` module.
|
466 |
+
///
|
467 |
+
/// Example:
|
468 |
+
/// ```
|
469 |
+
/// CELU model(CELUOptions().alpha(42.42).inplace(true));
|
470 |
+
/// ```
|
471 |
+
struct TORCH_API CELUOptions {
|
472 |
+
/// The `alpha` value for the CELU formulation. Default: 1.0
|
473 |
+
TORCH_ARG(double, alpha) = 1.0;
|
474 |
+
|
475 |
+
/// can optionally do the operation in-place. Default: False
|
476 |
+
TORCH_ARG(bool, inplace) = false;
|
477 |
+
};
|
478 |
+
|
479 |
+
namespace functional {
|
480 |
+
/// Options for `torch::nn::functional::celu`.
|
481 |
+
///
|
482 |
+
/// See the documentation for `torch::nn::CELUOptions` class to learn what
|
483 |
+
/// arguments are supported.
|
484 |
+
///
|
485 |
+
/// Example:
|
486 |
+
/// ```
|
487 |
+
/// namespace F = torch::nn::functional;
|
488 |
+
/// F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true));
|
489 |
+
/// ```
|
490 |
+
using CELUFuncOptions = CELUOptions;
|
491 |
+
} // namespace functional
|
492 |
+
|
493 |
+
// ============================================================================
|
494 |
+
|
495 |
+
/// Options for the `Softplus` module.
|
496 |
+
///
|
497 |
+
/// Example:
|
498 |
+
/// ```
|
499 |
+
/// Softplus model(SoftplusOptions().beta(0.24).threshold(42.42));
|
500 |
+
/// ```
|
501 |
+
struct TORCH_API SoftplusOptions {
|
502 |
+
/// the `beta` value for the Softplus formulation. Default: 1
|
503 |
+
TORCH_ARG(double, beta) = 1.0;
|
504 |
+
|
505 |
+
/// values above this revert to a linear function. Default: 20
|
506 |
+
TORCH_ARG(double, threshold) = 20.0;
|
507 |
+
};
|
508 |
+
|
509 |
+
namespace functional {
|
510 |
+
/// Options for `torch::nn::functional::softplus`.
|
511 |
+
///
|
512 |
+
/// See the documentation for `torch::nn::SoftplusOptions` class to learn what
|
513 |
+
/// arguments are supported.
|
514 |
+
///
|
515 |
+
/// Example:
|
516 |
+
/// ```
|
517 |
+
/// namespace F = torch::nn::functional;
|
518 |
+
/// F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0));
|
519 |
+
/// ```
|
520 |
+
using SoftplusFuncOptions = SoftplusOptions;
|
521 |
+
} // namespace functional
|
522 |
+
|
523 |
+
// ============================================================================
|
524 |
+
|
525 |
+
/// Options for the `Softshrink` module.
|
526 |
+
///
|
527 |
+
/// Example:
|
528 |
+
/// ```
|
529 |
+
/// Softshrink model(SoftshrinkOptions(42.42));
|
530 |
+
/// ```
|
531 |
+
struct TORCH_API SoftshrinkOptions {
|
532 |
+
/* implicit */ SoftshrinkOptions(double lambda = 0.5);
|
533 |
+
|
534 |
+
/// the `lambda` value for the Softshrink formulation. Default: 0.5
|
535 |
+
TORCH_ARG(double, lambda);
|
536 |
+
};
|
537 |
+
|
538 |
+
namespace functional {
|
539 |
+
/// Options for `torch::nn::functional::softshrink`.
|
540 |
+
///
|
541 |
+
/// See the documentation for `torch::nn::SoftshrinkOptions` class to learn what
|
542 |
+
/// arguments are supported.
|
543 |
+
///
|
544 |
+
/// Example:
|
545 |
+
/// ```
|
546 |
+
/// namespace F = torch::nn::functional;
|
547 |
+
/// F::softshrink(x, F::SoftshrinkFuncOptions(0.42));
|
548 |
+
/// ```
|
549 |
+
using SoftshrinkFuncOptions = SoftshrinkOptions;
|
550 |
+
} // namespace functional
|
551 |
+
|
552 |
+
// ============================================================================
|
553 |
+
|
554 |
+
/// Options for the `Threshold` module.
|
555 |
+
///
|
556 |
+
/// Example:
|
557 |
+
/// ```
|
558 |
+
/// Threshold model(ThresholdOptions(42.42, 24.24).inplace(true));
|
559 |
+
/// ```
|
560 |
+
struct TORCH_API ThresholdOptions {
|
561 |
+
ThresholdOptions(double threshold, double value)
|
562 |
+
: threshold_(threshold), value_(value) {}
|
563 |
+
|
564 |
+
/// The value to threshold at
|
565 |
+
TORCH_ARG(double, threshold);
|
566 |
+
|
567 |
+
/// The value to replace with
|
568 |
+
TORCH_ARG(double, value);
|
569 |
+
|
570 |
+
/// can optionally do the operation in-place. Default: False
|
571 |
+
TORCH_ARG(bool, inplace) = false;
|
572 |
+
};
|
573 |
+
|
574 |
+
namespace functional {
|
575 |
+
/// Options for `torch::nn::functional::threshold`.
|
576 |
+
///
|
577 |
+
/// See the documentation for `torch::nn::ThresholdOptions` class to learn what
|
578 |
+
/// arguments are supported.
|
579 |
+
///
|
580 |
+
/// Example:
|
581 |
+
/// ```
|
582 |
+
/// namespace F = torch::nn::functional;
|
583 |
+
/// F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true));
|
584 |
+
/// ```
|
585 |
+
using ThresholdFuncOptions = ThresholdOptions;
|
586 |
+
} // namespace functional
|
587 |
+
|
588 |
+
// ============================================================================
|
589 |
+
|
590 |
+
namespace functional {
|
591 |
+
|
592 |
+
/// Options for `torch::nn::functional::gumbel_softmax`.
|
593 |
+
///
|
594 |
+
/// Example:
|
595 |
+
/// ```
|
596 |
+
/// namespace F = torch::nn::functional;
|
597 |
+
/// F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(-1));
|
598 |
+
/// ```
|
599 |
+
struct TORCH_API GumbelSoftmaxFuncOptions {
|
600 |
+
/// non-negative scalar temperature
|
601 |
+
TORCH_ARG(double, tau) = 1.0;
|
602 |
+
|
603 |
+
/// returned samples will be discretized as one-hot vectors,
|
604 |
+
/// but will be differentiated as if it is the soft sample in autograd.
|
605 |
+
/// Default: False
|
606 |
+
TORCH_ARG(bool, hard) = false;
|
607 |
+
|
608 |
+
/// dimension along which softmax will be computed. Default: -1
|
609 |
+
TORCH_ARG(int, dim) = -1;
|
610 |
+
};
|
611 |
+
|
612 |
+
} // namespace functional
|
613 |
+
|
614 |
+
// ============================================================================
|
615 |
+
|
616 |
+
/// Options for the `MultiheadAttention` module.
|
617 |
+
///
|
618 |
+
/// Example:
|
619 |
+
/// ```
|
620 |
+
/// MultiheadAttention model(MultiheadAttentionOptions(20, 10).bias(false));
|
621 |
+
/// ```
|
622 |
+
struct TORCH_API MultiheadAttentionOptions {
|
623 |
+
MultiheadAttentionOptions(int64_t embed_dim, int64_t num_heads);
|
624 |
+
|
625 |
+
/// total dimension of the model.
|
626 |
+
TORCH_ARG(int64_t, embed_dim);
|
627 |
+
|
628 |
+
/// parallel attention heads.
|
629 |
+
TORCH_ARG(int64_t, num_heads);
|
630 |
+
|
631 |
+
/// a Dropout layer on attn_output_weights. Default: 0.0.
|
632 |
+
TORCH_ARG(double, dropout) = 0.0;
|
633 |
+
|
634 |
+
/// add bias as module parameter. Default: true.
|
635 |
+
TORCH_ARG(bool, bias) = true;
|
636 |
+
|
637 |
+
/// add bias to the key and value sequences at dim=0.
|
638 |
+
TORCH_ARG(bool, add_bias_kv) = false;
|
639 |
+
|
640 |
+
/// add a new batch of zeros to the key and value sequences at dim=1.
|
641 |
+
TORCH_ARG(bool, add_zero_attn) = false;
|
642 |
+
|
643 |
+
/// total number of features in key. Default: c10::nullopt.
|
644 |
+
TORCH_ARG(int64_t, kdim);
|
645 |
+
|
646 |
+
/// total number of features in key. Default: c10::nullopt.
|
647 |
+
TORCH_ARG(int64_t, vdim);
|
648 |
+
};
|
649 |
+
|
650 |
+
// ============================================================================
|
651 |
+
|
652 |
+
namespace functional {
|
653 |
+
|
654 |
+
/// Options for `torch::nn::functional::multi_head_attention_forward`
|
655 |
+
struct TORCH_API MultiheadAttentionForwardFuncOptions {
|
656 |
+
MultiheadAttentionForwardFuncOptions(
|
657 |
+
int64_t embed_dim_to_check,
|
658 |
+
int64_t num_heads,
|
659 |
+
Tensor in_proj_weight,
|
660 |
+
Tensor in_proj_bias,
|
661 |
+
Tensor bias_k,
|
662 |
+
Tensor bias_v,
|
663 |
+
bool add_zero_attn,
|
664 |
+
double dropout_p,
|
665 |
+
Tensor out_proj_weight,
|
666 |
+
Tensor out_proj_bias);
|
667 |
+
|
668 |
+
TORCH_ARG(int64_t, embed_dim_to_check);
|
669 |
+
|
670 |
+
TORCH_ARG(int64_t, num_heads);
|
671 |
+
|
672 |
+
TORCH_ARG(Tensor, in_proj_weight);
|
673 |
+
|
674 |
+
TORCH_ARG(Tensor, in_proj_bias);
|
675 |
+
|
676 |
+
TORCH_ARG(Tensor, bias_k);
|
677 |
+
|
678 |
+
TORCH_ARG(Tensor, bias_v);
|
679 |
+
|
680 |
+
TORCH_ARG(bool, add_zero_attn);
|
681 |
+
|
682 |
+
TORCH_ARG(double, dropout_p);
|
683 |
+
|
684 |
+
TORCH_ARG(Tensor, out_proj_weight);
|
685 |
+
|
686 |
+
TORCH_ARG(Tensor, out_proj_bias);
|
687 |
+
|
688 |
+
TORCH_ARG(bool, training) = true;
|
689 |
+
|
690 |
+
TORCH_ARG(Tensor, key_padding_mask) = {};
|
691 |
+
|
692 |
+
TORCH_ARG(bool, need_weights) = true;
|
693 |
+
|
694 |
+
TORCH_ARG(Tensor, attn_mask) = {};
|
695 |
+
|
696 |
+
TORCH_ARG(bool, use_separate_proj_weight) = false;
|
697 |
+
|
698 |
+
TORCH_ARG(Tensor, q_proj_weight) = {};
|
699 |
+
|
700 |
+
TORCH_ARG(Tensor, k_proj_weight) = {};
|
701 |
+
|
702 |
+
TORCH_ARG(Tensor, v_proj_weight) = {};
|
703 |
+
|
704 |
+
TORCH_ARG(Tensor, static_k) = {};
|
705 |
+
|
706 |
+
TORCH_ARG(Tensor, static_v) = {};
|
707 |
+
|
708 |
+
TORCH_ARG(bool, average_attn_weights) = true;
|
709 |
+
};
|
710 |
+
|
711 |
+
} // namespace functional
|
712 |
+
|
713 |
+
} // namespace nn
|
714 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/adaptive.h
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/types.h>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace nn {
|
9 |
+
|
10 |
+
/// Options for the `AdaptiveLogSoftmaxWithLoss` module.
|
11 |
+
///
|
12 |
+
/// Example:
|
13 |
+
/// ```
|
14 |
+
/// AdaptiveLogSoftmaxWithLoss model(AdaptiveLogSoftmaxWithLossOptions(8, 10,
|
15 |
+
/// {4, 8}).div_value(2.).head_bias(true));
|
16 |
+
/// ```
|
17 |
+
struct TORCH_API AdaptiveLogSoftmaxWithLossOptions {
|
18 |
+
/* implicit */ AdaptiveLogSoftmaxWithLossOptions(
|
19 |
+
int64_t in_features,
|
20 |
+
int64_t n_classes,
|
21 |
+
std::vector<int64_t> cutoffs);
|
22 |
+
|
23 |
+
/// Number of features in the input tensor
|
24 |
+
TORCH_ARG(int64_t, in_features);
|
25 |
+
|
26 |
+
/// Number of classes in the dataset
|
27 |
+
TORCH_ARG(int64_t, n_classes);
|
28 |
+
|
29 |
+
/// Cutoffs used to assign targets to their buckets
|
30 |
+
TORCH_ARG(std::vector<int64_t>, cutoffs);
|
31 |
+
|
32 |
+
/// value used as an exponent to compute sizes of the clusters. Default: 4.0
|
33 |
+
TORCH_ARG(double, div_value) = 4.;
|
34 |
+
|
35 |
+
/// If ``true``, adds a bias term to the 'head' of
|
36 |
+
/// the adaptive softmax. Default: false
|
37 |
+
TORCH_ARG(bool, head_bias) = false;
|
38 |
+
};
|
39 |
+
|
40 |
+
} // namespace nn
|
41 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/batchnorm.h
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/types.h>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace nn {
|
9 |
+
|
10 |
+
/// Options for the `BatchNorm` module.
|
11 |
+
struct TORCH_API BatchNormOptions {
|
12 |
+
/* implicit */ BatchNormOptions(int64_t num_features);
|
13 |
+
|
14 |
+
/// The number of features of the input tensor.
|
15 |
+
/// Changing this parameter after construction __has no effect__.
|
16 |
+
TORCH_ARG(int64_t, num_features);
|
17 |
+
|
18 |
+
/// The epsilon value added for numerical stability.
|
19 |
+
/// Changing this parameter after construction __is effective__.
|
20 |
+
TORCH_ARG(double, eps) = 1e-5;
|
21 |
+
|
22 |
+
/// A momentum multiplier for the mean and variance.
|
23 |
+
/// Changing this parameter after construction __is effective__.
|
24 |
+
TORCH_ARG(c10::optional<double>, momentum) = 0.1;
|
25 |
+
|
26 |
+
/// Whether to learn a scale and bias that are applied in an affine
|
27 |
+
/// transformation on the input.
|
28 |
+
/// Changing this parameter after construction __has no effect__.
|
29 |
+
TORCH_ARG(bool, affine) = true;
|
30 |
+
|
31 |
+
/// Whether to store and update batch statistics (mean and variance) in the
|
32 |
+
/// module.
|
33 |
+
/// Changing this parameter after construction __has no effect__.
|
34 |
+
TORCH_ARG(bool, track_running_stats) = true;
|
35 |
+
};
|
36 |
+
|
37 |
+
/// Options for the `BatchNorm1d` module.
|
38 |
+
///
|
39 |
+
/// Example:
|
40 |
+
/// ```
|
41 |
+
/// BatchNorm1d
|
42 |
+
/// model(BatchNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
|
43 |
+
/// ```
|
44 |
+
using BatchNorm1dOptions = BatchNormOptions;
|
45 |
+
|
46 |
+
/// Options for the `BatchNorm2d` module.
|
47 |
+
///
|
48 |
+
/// Example:
|
49 |
+
/// ```
|
50 |
+
/// BatchNorm2d
|
51 |
+
/// model(BatchNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
|
52 |
+
/// ```
|
53 |
+
using BatchNorm2dOptions = BatchNormOptions;
|
54 |
+
|
55 |
+
/// Options for the `BatchNorm3d` module.
|
56 |
+
///
|
57 |
+
/// Example:
|
58 |
+
/// ```
|
59 |
+
/// BatchNorm3d
|
60 |
+
/// model(BatchNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
|
61 |
+
/// ```
|
62 |
+
using BatchNorm3dOptions = BatchNormOptions;
|
63 |
+
|
64 |
+
// ============================================================================
|
65 |
+
|
66 |
+
namespace functional {
|
67 |
+
|
68 |
+
/// Options for `torch::nn::functional::batch_norm`.
|
69 |
+
///
|
70 |
+
/// Example:
|
71 |
+
/// ```
|
72 |
+
/// namespace F = torch::nn::functional;
|
73 |
+
/// F::batch_norm(input, mean, variance,
|
74 |
+
/// F::BatchNormFuncOptions().weight(weight).bias(bias).momentum(0.1).eps(1e-05).training(false));
|
75 |
+
/// ```
|
76 |
+
struct TORCH_API BatchNormFuncOptions {
|
77 |
+
TORCH_ARG(Tensor, weight) = Tensor();
|
78 |
+
|
79 |
+
TORCH_ARG(Tensor, bias) = Tensor();
|
80 |
+
|
81 |
+
TORCH_ARG(bool, training) = false;
|
82 |
+
|
83 |
+
/// A momentum multiplier for the mean and variance.
|
84 |
+
/// Changing this parameter after construction __is effective__.
|
85 |
+
TORCH_ARG(c10::optional<double>, momentum) = 0.1;
|
86 |
+
|
87 |
+
/// The epsilon value added for numerical stability.
|
88 |
+
/// Changing this parameter after construction __is effective__.
|
89 |
+
TORCH_ARG(double, eps) = 1e-5;
|
90 |
+
};
|
91 |
+
|
92 |
+
} // namespace functional
|
93 |
+
|
94 |
+
} // namespace nn
|
95 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/conv.h
ADDED
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/enum.h>
|
6 |
+
#include <torch/expanding_array.h>
|
7 |
+
#include <torch/types.h>
|
8 |
+
|
9 |
+
namespace torch {
|
10 |
+
namespace nn {
|
11 |
+
|
12 |
+
namespace detail {
|
13 |
+
|
14 |
+
typedef std::variant<
|
15 |
+
enumtype::kZeros,
|
16 |
+
enumtype::kReflect,
|
17 |
+
enumtype::kReplicate,
|
18 |
+
enumtype::kCircular>
|
19 |
+
conv_padding_mode_t;
|
20 |
+
|
21 |
+
template <size_t D>
|
22 |
+
using conv_padding_t =
|
23 |
+
std::variant<ExpandingArray<D>, enumtype::kValid, enumtype::kSame>;
|
24 |
+
|
25 |
+
/// Options for a `D`-dimensional convolution or convolution transpose module.
|
26 |
+
template <size_t D>
|
27 |
+
struct ConvNdOptions {
|
28 |
+
using padding_t = conv_padding_t<D>;
|
29 |
+
ConvNdOptions(
|
30 |
+
int64_t in_channels,
|
31 |
+
int64_t out_channels,
|
32 |
+
ExpandingArray<D> kernel_size)
|
33 |
+
: in_channels_(in_channels),
|
34 |
+
out_channels_(out_channels),
|
35 |
+
kernel_size_(std::move(kernel_size)) {}
|
36 |
+
|
37 |
+
/// The number of channels the input volumes will have.
|
38 |
+
/// Changing this parameter after construction __has no effect__.
|
39 |
+
TORCH_ARG(int64_t, in_channels);
|
40 |
+
|
41 |
+
/// The number of output channels the convolution should produce.
|
42 |
+
/// Changing this parameter after construction __has no effect__.
|
43 |
+
TORCH_ARG(int64_t, out_channels);
|
44 |
+
|
45 |
+
/// The kernel size to use.
|
46 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
47 |
+
/// numbers.
|
48 |
+
/// This parameter __can__ be changed after construction.
|
49 |
+
TORCH_ARG(ExpandingArray<D>, kernel_size);
|
50 |
+
|
51 |
+
/// The stride of the convolution.
|
52 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
53 |
+
/// numbers.
|
54 |
+
/// This parameter __can__ be changed after construction.
|
55 |
+
TORCH_ARG(ExpandingArray<D>, stride) = 1;
|
56 |
+
|
57 |
+
/// The padding to add to the input volumes.
|
58 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
59 |
+
/// numbers.
|
60 |
+
/// This parameter __can__ be changed after construction.
|
61 |
+
TORCH_ARG(padding_t, padding) = 0;
|
62 |
+
|
63 |
+
public:
|
64 |
+
decltype(auto) padding(std::initializer_list<int64_t> il) {
|
65 |
+
return padding(IntArrayRef{il});
|
66 |
+
}
|
67 |
+
|
68 |
+
/// The kernel dilation.
|
69 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
70 |
+
/// numbers.
|
71 |
+
/// This parameter __can__ be changed after construction.
|
72 |
+
TORCH_ARG(ExpandingArray<D>, dilation) = 1;
|
73 |
+
|
74 |
+
/// If true, convolutions will be transpose convolutions (a.k.a.
|
75 |
+
/// deconvolutions).
|
76 |
+
/// Changing this parameter after construction __has no effect__.
|
77 |
+
TORCH_ARG(bool, transposed) = false;
|
78 |
+
|
79 |
+
/// For transpose convolutions, the padding to add to output volumes.
|
80 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
81 |
+
/// numbers.
|
82 |
+
/// This parameter __can__ be changed after construction.
|
83 |
+
TORCH_ARG(ExpandingArray<D>, output_padding) = 0;
|
84 |
+
|
85 |
+
/// The number of convolution groups.
|
86 |
+
/// This parameter __can__ be changed after construction.
|
87 |
+
TORCH_ARG(int64_t, groups) = 1;
|
88 |
+
|
89 |
+
/// Whether to add a bias after individual applications of the kernel.
|
90 |
+
/// Changing this parameter after construction __has no effect__.
|
91 |
+
TORCH_ARG(bool, bias) = true;
|
92 |
+
|
93 |
+
/// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or
|
94 |
+
/// `torch::kCircular`. Default: `torch::kZeros`
|
95 |
+
TORCH_ARG(conv_padding_mode_t, padding_mode) = torch::kZeros;
|
96 |
+
};
|
97 |
+
|
98 |
+
} // namespace detail
|
99 |
+
|
100 |
+
// ============================================================================
|
101 |
+
|
102 |
+
/// Options for a `D`-dimensional convolution module.
|
103 |
+
template <size_t D>
|
104 |
+
struct ConvOptions {
|
105 |
+
using padding_mode_t = detail::conv_padding_mode_t;
|
106 |
+
using padding_t = detail::conv_padding_t<D>;
|
107 |
+
|
108 |
+
ConvOptions(
|
109 |
+
int64_t in_channels,
|
110 |
+
int64_t out_channels,
|
111 |
+
ExpandingArray<D> kernel_size)
|
112 |
+
: in_channels_(in_channels),
|
113 |
+
out_channels_(out_channels),
|
114 |
+
kernel_size_(std::move(kernel_size)) {}
|
115 |
+
|
116 |
+
/// The number of channels the input volumes will have.
|
117 |
+
/// Changing this parameter after construction __has no effect__.
|
118 |
+
TORCH_ARG(int64_t, in_channels);
|
119 |
+
|
120 |
+
/// The number of output channels the convolution should produce.
|
121 |
+
/// Changing this parameter after construction __has no effect__.
|
122 |
+
TORCH_ARG(int64_t, out_channels);
|
123 |
+
|
124 |
+
/// The kernel size to use.
|
125 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
126 |
+
/// numbers.
|
127 |
+
/// This parameter __can__ be changed after construction.
|
128 |
+
TORCH_ARG(ExpandingArray<D>, kernel_size);
|
129 |
+
|
130 |
+
/// The stride of the convolution.
|
131 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
132 |
+
/// numbers.
|
133 |
+
/// This parameter __can__ be changed after construction.
|
134 |
+
TORCH_ARG(ExpandingArray<D>, stride) = 1;
|
135 |
+
|
136 |
+
/// The padding to add to the input volumes.
|
137 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
138 |
+
/// numbers.
|
139 |
+
/// This parameter __can__ be changed after construction.
|
140 |
+
TORCH_ARG(padding_t, padding) = 0;
|
141 |
+
|
142 |
+
public:
|
143 |
+
decltype(auto) padding(std::initializer_list<int64_t> il) {
|
144 |
+
return padding(IntArrayRef{il});
|
145 |
+
}
|
146 |
+
|
147 |
+
/// The kernel dilation.
|
148 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
149 |
+
/// numbers.
|
150 |
+
/// This parameter __can__ be changed after construction.
|
151 |
+
TORCH_ARG(ExpandingArray<D>, dilation) = 1;
|
152 |
+
|
153 |
+
/// The number of convolution groups.
|
154 |
+
/// This parameter __can__ be changed after construction.
|
155 |
+
TORCH_ARG(int64_t, groups) = 1;
|
156 |
+
|
157 |
+
/// Whether to add a bias after individual applications of the kernel.
|
158 |
+
/// Changing this parameter after construction __has no effect__.
|
159 |
+
TORCH_ARG(bool, bias) = true;
|
160 |
+
|
161 |
+
/// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or
|
162 |
+
/// `torch::kCircular`. Default: `torch::kZeros`
|
163 |
+
TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros;
|
164 |
+
};
|
165 |
+
|
166 |
+
/// `ConvOptions` specialized for the `Conv1d` module.
|
167 |
+
///
|
168 |
+
/// Example:
|
169 |
+
/// ```
|
170 |
+
/// Conv1d model(Conv1dOptions(3, 2, 3).stride(1).bias(false));
|
171 |
+
/// ```
|
172 |
+
using Conv1dOptions = ConvOptions<1>;
|
173 |
+
|
174 |
+
/// `ConvOptions` specialized for the `Conv2d` module.
|
175 |
+
///
|
176 |
+
/// Example:
|
177 |
+
/// ```
|
178 |
+
/// Conv2d model(Conv2dOptions(3, 2, 3).stride(1).bias(false));
|
179 |
+
/// ```
|
180 |
+
using Conv2dOptions = ConvOptions<2>;
|
181 |
+
|
182 |
+
/// `ConvOptions` specialized for the `Conv3d` module.
|
183 |
+
///
|
184 |
+
/// Example:
|
185 |
+
/// ```
|
186 |
+
/// Conv3d model(Conv3dOptions(3, 2, 3).stride(1).bias(false));
|
187 |
+
/// ```
|
188 |
+
using Conv3dOptions = ConvOptions<3>;
|
189 |
+
|
190 |
+
// ============================================================================
|
191 |
+
|
192 |
+
namespace functional {
|
193 |
+
|
194 |
+
/// Options for a `D`-dimensional convolution functional.
|
195 |
+
template <size_t D>
|
196 |
+
struct ConvFuncOptions {
|
197 |
+
using padding_t = torch::nn::detail::conv_padding_t<D>;
|
198 |
+
|
199 |
+
/// optional bias of shape `(out_channels)`. Default: ``None``
|
200 |
+
TORCH_ARG(torch::Tensor, bias) = Tensor();
|
201 |
+
|
202 |
+
/// The stride of the convolving kernel.
|
203 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
204 |
+
/// numbers.
|
205 |
+
TORCH_ARG(ExpandingArray<D>, stride) = 1;
|
206 |
+
|
207 |
+
/// Implicit paddings on both sides of the input.
|
208 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
209 |
+
/// numbers.
|
210 |
+
TORCH_ARG(padding_t, padding) = 0;
|
211 |
+
|
212 |
+
public:
|
213 |
+
decltype(auto) padding(std::initializer_list<int64_t> il) {
|
214 |
+
return padding(IntArrayRef{il});
|
215 |
+
}
|
216 |
+
|
217 |
+
/// The spacing between kernel elements.
|
218 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
219 |
+
/// numbers.
|
220 |
+
TORCH_ARG(ExpandingArray<D>, dilation) = 1;
|
221 |
+
|
222 |
+
/// Split input into groups, `in_channels` should be divisible by
|
223 |
+
/// the number of groups.
|
224 |
+
TORCH_ARG(int64_t, groups) = 1;
|
225 |
+
};
|
226 |
+
|
227 |
+
/// `ConvFuncOptions` specialized for `torch::nn::functional::conv1d`.
|
228 |
+
///
|
229 |
+
/// Example:
|
230 |
+
/// ```
|
231 |
+
/// namespace F = torch::nn::functional;
|
232 |
+
/// F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1));
|
233 |
+
/// ```
|
234 |
+
using Conv1dFuncOptions = ConvFuncOptions<1>;
|
235 |
+
|
236 |
+
/// `ConvFuncOptions` specialized for `torch::nn::functional::conv2d`.
|
237 |
+
///
|
238 |
+
/// Example:
|
239 |
+
/// ```
|
240 |
+
/// namespace F = torch::nn::functional;
|
241 |
+
/// F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1));
|
242 |
+
/// ```
|
243 |
+
using Conv2dFuncOptions = ConvFuncOptions<2>;
|
244 |
+
|
245 |
+
/// `ConvFuncOptions` specialized for `torch::nn::functional::conv3d`.
|
246 |
+
///
|
247 |
+
/// Example:
|
248 |
+
/// ```
|
249 |
+
/// namespace F = torch::nn::functional;
|
250 |
+
/// F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1));
|
251 |
+
/// ```
|
252 |
+
using Conv3dFuncOptions = ConvFuncOptions<3>;
|
253 |
+
|
254 |
+
} // namespace functional
|
255 |
+
|
256 |
+
// ============================================================================
|
257 |
+
|
258 |
+
template <size_t D>
|
259 |
+
struct ConvTransposeOptions {
|
260 |
+
using padding_mode_t = detail::conv_padding_mode_t;
|
261 |
+
|
262 |
+
ConvTransposeOptions(
|
263 |
+
int64_t in_channels,
|
264 |
+
int64_t out_channels,
|
265 |
+
ExpandingArray<D> kernel_size)
|
266 |
+
: in_channels_(in_channels),
|
267 |
+
out_channels_(out_channels),
|
268 |
+
kernel_size_(std::move(kernel_size)) {}
|
269 |
+
|
270 |
+
/// The number of channels the input volumes will have.
|
271 |
+
/// Changing this parameter after construction __has no effect__.
|
272 |
+
TORCH_ARG(int64_t, in_channels);
|
273 |
+
|
274 |
+
/// The number of output channels the convolution should produce.
|
275 |
+
/// Changing this parameter after construction __has no effect__.
|
276 |
+
TORCH_ARG(int64_t, out_channels);
|
277 |
+
|
278 |
+
/// The kernel size to use.
|
279 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
280 |
+
/// numbers.
|
281 |
+
/// This parameter __can__ be changed after construction.
|
282 |
+
TORCH_ARG(ExpandingArray<D>, kernel_size);
|
283 |
+
|
284 |
+
/// The stride of the convolution.
|
285 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
286 |
+
/// numbers.
|
287 |
+
/// This parameter __can__ be changed after construction.
|
288 |
+
TORCH_ARG(ExpandingArray<D>, stride) = 1;
|
289 |
+
|
290 |
+
/// The padding to add to the input volumes.
|
291 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
292 |
+
/// numbers.
|
293 |
+
/// This parameter __can__ be changed after construction.
|
294 |
+
TORCH_ARG(ExpandingArray<D>, padding) = 0;
|
295 |
+
|
296 |
+
/// For transpose convolutions, the padding to add to output volumes.
|
297 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
298 |
+
/// numbers.
|
299 |
+
/// This parameter __can__ be changed after construction.
|
300 |
+
TORCH_ARG(ExpandingArray<D>, output_padding) = 0;
|
301 |
+
|
302 |
+
/// The number of convolution groups.
|
303 |
+
/// This parameter __can__ be changed after construction.
|
304 |
+
TORCH_ARG(int64_t, groups) = 1;
|
305 |
+
|
306 |
+
/// Whether to add a bias after individual applications of the kernel.
|
307 |
+
/// Changing this parameter after construction __has no effect__.
|
308 |
+
TORCH_ARG(bool, bias) = true;
|
309 |
+
|
310 |
+
/// The kernel dilation.
|
311 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
312 |
+
/// numbers.
|
313 |
+
/// This parameter __can__ be changed after construction.
|
314 |
+
TORCH_ARG(ExpandingArray<D>, dilation) = 1;
|
315 |
+
|
316 |
+
/// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or
|
317 |
+
/// `torch::kCircular`. Default: `torch::kZeros`
|
318 |
+
TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros;
|
319 |
+
};
|
320 |
+
|
321 |
+
/// `ConvTransposeOptions` specialized for the `ConvTranspose1d` module.
|
322 |
+
///
|
323 |
+
/// Example:
|
324 |
+
/// ```
|
325 |
+
/// ConvTranspose1d model(ConvTranspose1dOptions(3, 2,
|
326 |
+
/// 3).stride(1).bias(false));
|
327 |
+
/// ```
|
328 |
+
using ConvTranspose1dOptions = ConvTransposeOptions<1>;
|
329 |
+
|
330 |
+
/// `ConvTransposeOptions` specialized for the `ConvTranspose2d` module.
|
331 |
+
///
|
332 |
+
/// Example:
|
333 |
+
/// ```
|
334 |
+
/// ConvTranspose2d model(ConvTranspose2dOptions(3, 2,
|
335 |
+
/// 3).stride(1).bias(false));
|
336 |
+
/// ```
|
337 |
+
using ConvTranspose2dOptions = ConvTransposeOptions<2>;
|
338 |
+
|
339 |
+
/// `ConvTransposeOptions` specialized for the `ConvTranspose3d` module.
|
340 |
+
///
|
341 |
+
/// Example:
|
342 |
+
/// ```
|
343 |
+
/// ConvTranspose3d model(ConvTranspose3dOptions(2, 2,
|
344 |
+
/// 2).stride(1).bias(false));
|
345 |
+
/// ```
|
346 |
+
using ConvTranspose3dOptions = ConvTransposeOptions<3>;
|
347 |
+
|
348 |
+
// ============================================================================
|
349 |
+
|
350 |
+
namespace functional {
|
351 |
+
|
352 |
+
/// Options for a `D`-dimensional convolution functional.
|
353 |
+
template <size_t D>
|
354 |
+
struct ConvTransposeFuncOptions {
|
355 |
+
/// optional bias of shape `(out_channels)`. Default: ``None``
|
356 |
+
TORCH_ARG(torch::Tensor, bias) = Tensor();
|
357 |
+
|
358 |
+
/// The stride of the convolving kernel.
|
359 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
360 |
+
/// numbers.
|
361 |
+
TORCH_ARG(ExpandingArray<D>, stride) = 1;
|
362 |
+
|
363 |
+
/// Implicit paddings on both sides of the input.
|
364 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
365 |
+
/// numbers.
|
366 |
+
TORCH_ARG(ExpandingArray<D>, padding) = 0;
|
367 |
+
|
368 |
+
/// Additional size added to one side of each dimension in the output shape.
|
369 |
+
/// Default: 0
|
370 |
+
TORCH_ARG(ExpandingArray<D>, output_padding) = 0;
|
371 |
+
|
372 |
+
/// Split input into groups, `in_channels` should be divisible by
|
373 |
+
/// the number of groups.
|
374 |
+
TORCH_ARG(int64_t, groups) = 1;
|
375 |
+
|
376 |
+
/// The spacing between kernel elements.
|
377 |
+
/// For a `D`-dim convolution, must be a single number or a list of `D`
|
378 |
+
/// numbers.
|
379 |
+
TORCH_ARG(ExpandingArray<D>, dilation) = 1;
|
380 |
+
};
|
381 |
+
|
382 |
+
/// `ConvTransposeFuncOptions` specialized for
|
383 |
+
/// `torch::nn::functional::conv_transpose1d`.
|
384 |
+
///
|
385 |
+
/// Example:
|
386 |
+
/// ```
|
387 |
+
/// namespace F = torch::nn::functional;
|
388 |
+
/// F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1));
|
389 |
+
/// ```
|
390 |
+
using ConvTranspose1dFuncOptions = ConvTransposeFuncOptions<1>;
|
391 |
+
|
392 |
+
/// `ConvTransposeFuncOptions` specialized for
|
393 |
+
/// `torch::nn::functional::conv_transpose2d`.
|
394 |
+
///
|
395 |
+
/// Example:
|
396 |
+
/// ```
|
397 |
+
/// namespace F = torch::nn::functional;
|
398 |
+
/// F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1));
|
399 |
+
/// ```
|
400 |
+
using ConvTranspose2dFuncOptions = ConvTransposeFuncOptions<2>;
|
401 |
+
|
402 |
+
/// `ConvTransposeFuncOptions` specialized for
|
403 |
+
/// `torch::nn::functional::conv_transpose3d`.
|
404 |
+
///
|
405 |
+
/// Example:
|
406 |
+
/// ```
|
407 |
+
/// namespace F = torch::nn::functional;
|
408 |
+
/// F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1));
|
409 |
+
/// ```
|
410 |
+
using ConvTranspose3dFuncOptions = ConvTransposeFuncOptions<3>;
|
411 |
+
|
412 |
+
} // namespace functional
|
413 |
+
|
414 |
+
} // namespace nn
|
415 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/distance.h
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/types.h>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace nn {
|
9 |
+
|
10 |
+
/// Options for the `CosineSimilarity` module.
|
11 |
+
///
|
12 |
+
/// Example:
|
13 |
+
/// ```
|
14 |
+
/// CosineSimilarity model(CosineSimilarityOptions().dim(0).eps(0.5));
|
15 |
+
/// ```
|
16 |
+
struct TORCH_API CosineSimilarityOptions {
|
17 |
+
/// Dimension where cosine similarity is computed. Default: 1
|
18 |
+
TORCH_ARG(int64_t, dim) = 1;
|
19 |
+
/// Small value to avoid division by zero. Default: 1e-8
|
20 |
+
TORCH_ARG(double, eps) = 1e-8;
|
21 |
+
};
|
22 |
+
|
23 |
+
namespace functional {
|
24 |
+
/// Options for `torch::nn::functional::cosine_similarity`.
|
25 |
+
///
|
26 |
+
/// See the documentation for `torch::nn::CosineSimilarityOptions` class to
|
27 |
+
/// learn what arguments are supported.
|
28 |
+
///
|
29 |
+
/// Example:
|
30 |
+
/// ```
|
31 |
+
/// namespace F = torch::nn::functional;
|
32 |
+
/// F::cosine_similarity(input1, input2,
|
33 |
+
/// F::CosineSimilarityFuncOptions().dim(1));
|
34 |
+
/// ```
|
35 |
+
using CosineSimilarityFuncOptions = CosineSimilarityOptions;
|
36 |
+
} // namespace functional
|
37 |
+
|
38 |
+
// ============================================================================
|
39 |
+
|
40 |
+
/// Options for the `PairwiseDistance` module.
|
41 |
+
///
|
42 |
+
/// Example:
|
43 |
+
/// ```
|
44 |
+
/// PairwiseDistance
|
45 |
+
/// model(PairwiseDistanceOptions().p(3).eps(0.5).keepdim(true));
|
46 |
+
/// ```
|
47 |
+
struct TORCH_API PairwiseDistanceOptions {
|
48 |
+
/// The norm degree. Default: 2
|
49 |
+
TORCH_ARG(double, p) = 2.0;
|
50 |
+
/// Small value to avoid division by zero. Default: 1e-6
|
51 |
+
TORCH_ARG(double, eps) = 1e-6;
|
52 |
+
/// Determines whether or not to keep the vector dimension. Default: false
|
53 |
+
TORCH_ARG(bool, keepdim) = false;
|
54 |
+
};
|
55 |
+
|
56 |
+
namespace functional {
|
57 |
+
/// Options for `torch::nn::functional::pairwise_distance`.
|
58 |
+
///
|
59 |
+
/// See the documentation for `torch::nn::PairwiseDistanceOptions` class to
|
60 |
+
/// learn what arguments are supported.
|
61 |
+
///
|
62 |
+
/// Example:
|
63 |
+
/// ```
|
64 |
+
/// namespace F = torch::nn::functional;
|
65 |
+
/// F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1));
|
66 |
+
/// ```
|
67 |
+
using PairwiseDistanceFuncOptions = PairwiseDistanceOptions;
|
68 |
+
} // namespace functional
|
69 |
+
|
70 |
+
} // namespace nn
|
71 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/dropout.h
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/types.h>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace nn {
|
9 |
+
|
10 |
+
/// Options for the `Dropout` module.
|
11 |
+
///
|
12 |
+
/// Example:
|
13 |
+
/// ```
|
14 |
+
/// Dropout model(DropoutOptions().p(0.42).inplace(true));
|
15 |
+
/// ```
|
16 |
+
struct TORCH_API DropoutOptions {
|
17 |
+
/* implicit */ DropoutOptions(double p = 0.5);
|
18 |
+
|
19 |
+
/// The probability of an element to be zeroed. Default: 0.5
|
20 |
+
TORCH_ARG(double, p) = 0.5;
|
21 |
+
|
22 |
+
/// can optionally do the operation in-place. Default: False
|
23 |
+
TORCH_ARG(bool, inplace) = false;
|
24 |
+
};
|
25 |
+
|
26 |
+
/// Options for the `Dropout2d` module.
|
27 |
+
///
|
28 |
+
/// Example:
|
29 |
+
/// ```
|
30 |
+
/// Dropout2d model(Dropout2dOptions().p(0.42).inplace(true));
|
31 |
+
/// ```
|
32 |
+
using Dropout2dOptions = DropoutOptions;
|
33 |
+
|
34 |
+
/// Options for the `Dropout3d` module.
|
35 |
+
///
|
36 |
+
/// Example:
|
37 |
+
/// ```
|
38 |
+
/// Dropout3d model(Dropout3dOptions().p(0.42).inplace(true));
|
39 |
+
/// ```
|
40 |
+
using Dropout3dOptions = DropoutOptions;
|
41 |
+
|
42 |
+
/// Options for the `AlphaDropout` module.
|
43 |
+
///
|
44 |
+
/// Example:
|
45 |
+
/// ```
|
46 |
+
/// AlphaDropout model(AlphaDropoutOptions(0.2).inplace(true));
|
47 |
+
/// ```
|
48 |
+
using AlphaDropoutOptions = DropoutOptions;
|
49 |
+
|
50 |
+
/// Options for the `FeatureAlphaDropout` module.
|
51 |
+
///
|
52 |
+
/// Example:
|
53 |
+
/// ```
|
54 |
+
/// FeatureAlphaDropout model(FeatureAlphaDropoutOptions(0.2).inplace(true));
|
55 |
+
/// ```
|
56 |
+
using FeatureAlphaDropoutOptions = DropoutOptions;
|
57 |
+
|
58 |
+
namespace functional {
|
59 |
+
|
60 |
+
/// Options for `torch::nn::functional::dropout`.
|
61 |
+
///
|
62 |
+
/// Example:
|
63 |
+
/// ```
|
64 |
+
/// namespace F = torch::nn::functional;
|
65 |
+
/// F::dropout(input, F::DropoutFuncOptions().p(0.5));
|
66 |
+
/// ```
|
67 |
+
struct TORCH_API DropoutFuncOptions {
|
68 |
+
/// The probability of an element to be zeroed. Default: 0.5
|
69 |
+
TORCH_ARG(double, p) = 0.5;
|
70 |
+
|
71 |
+
TORCH_ARG(bool, training) = true;
|
72 |
+
|
73 |
+
/// can optionally do the operation in-place. Default: False
|
74 |
+
TORCH_ARG(bool, inplace) = false;
|
75 |
+
};
|
76 |
+
|
77 |
+
/// Options for `torch::nn::functional::dropout2d`.
|
78 |
+
///
|
79 |
+
/// Example:
|
80 |
+
/// ```
|
81 |
+
/// namespace F = torch::nn::functional;
|
82 |
+
/// F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5));
|
83 |
+
/// ```
|
84 |
+
using Dropout2dFuncOptions = DropoutFuncOptions;
|
85 |
+
|
86 |
+
/// Options for `torch::nn::functional::dropout3d`.
|
87 |
+
///
|
88 |
+
/// Example:
|
89 |
+
/// ```
|
90 |
+
/// namespace F = torch::nn::functional;
|
91 |
+
/// F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5));
|
92 |
+
/// ```
|
93 |
+
using Dropout3dFuncOptions = DropoutFuncOptions;
|
94 |
+
|
95 |
+
/// Options for `torch::nn::functional::alpha_dropout`.
|
96 |
+
///
|
97 |
+
/// Example:
|
98 |
+
/// ```
|
99 |
+
/// namespace F = torch::nn::functional;
|
100 |
+
/// F::alpha_dropout(input,
|
101 |
+
/// F::AlphaDropoutFuncOptions().p(0.5).training(false));
|
102 |
+
/// ```
|
103 |
+
struct TORCH_API AlphaDropoutFuncOptions {
|
104 |
+
TORCH_ARG(double, p) = 0.5;
|
105 |
+
|
106 |
+
TORCH_ARG(bool, training) = false;
|
107 |
+
|
108 |
+
TORCH_ARG(bool, inplace) = false;
|
109 |
+
};
|
110 |
+
|
111 |
+
/// Options for `torch::nn::functional::feature_alpha_dropout`.
|
112 |
+
///
|
113 |
+
/// Example:
|
114 |
+
/// ```
|
115 |
+
/// namespace F = torch::nn::functional;
|
116 |
+
/// F::feature_alpha_dropout(input,
|
117 |
+
/// F::FeatureAlphaDropoutFuncOptions().p(0.5).training(false));
|
118 |
+
/// ```
|
119 |
+
struct TORCH_API FeatureAlphaDropoutFuncOptions {
|
120 |
+
TORCH_ARG(double, p) = 0.5;
|
121 |
+
|
122 |
+
TORCH_ARG(bool, training) = false;
|
123 |
+
|
124 |
+
TORCH_ARG(bool, inplace) = false;
|
125 |
+
};
|
126 |
+
|
127 |
+
} // namespace functional
|
128 |
+
|
129 |
+
} // namespace nn
|
130 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/embedding.h
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/enum.h>
|
6 |
+
#include <torch/types.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace nn {
|
10 |
+
|
11 |
+
/// Options for the `Embedding` module.
|
12 |
+
///
|
13 |
+
/// Example:
|
14 |
+
/// ```
|
15 |
+
/// Embedding model(EmbeddingOptions(10,
|
16 |
+
/// 2).padding_idx(3).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true));
|
17 |
+
/// ```
|
18 |
+
struct TORCH_API EmbeddingOptions {
|
19 |
+
EmbeddingOptions(int64_t num_embeddings, int64_t embedding_dim);
|
20 |
+
|
21 |
+
/// The size of the dictionary of embeddings.
|
22 |
+
TORCH_ARG(int64_t, num_embeddings);
|
23 |
+
/// The size of each embedding vector.
|
24 |
+
TORCH_ARG(int64_t, embedding_dim);
|
25 |
+
/// If specified, the entries at `padding_idx` do not contribute to the
|
26 |
+
/// gradient; therefore, the embedding vector at `padding_idx` is not updated
|
27 |
+
/// during training, i.e. it remains as a fixed "pad". For a newly constructed
|
28 |
+
/// Embedding, the embedding vector at `padding_idx` will default to all
|
29 |
+
/// zeros, but can be updated to another value to be used as the padding
|
30 |
+
/// vector.
|
31 |
+
TORCH_ARG(c10::optional<int64_t>, padding_idx) = c10::nullopt;
|
32 |
+
/// If given, each embedding vector with norm larger than `max_norm` is
|
33 |
+
/// renormalized to have norm `max_norm`.
|
34 |
+
TORCH_ARG(c10::optional<double>, max_norm) = c10::nullopt;
|
35 |
+
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
|
36 |
+
TORCH_ARG(double, norm_type) = 2.;
|
37 |
+
/// If given, this will scale gradients by the inverse of frequency of the
|
38 |
+
/// words in the mini-batch. Default ``false``.
|
39 |
+
TORCH_ARG(bool, scale_grad_by_freq) = false;
|
40 |
+
/// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
|
41 |
+
TORCH_ARG(bool, sparse) = false;
|
42 |
+
/// The learnable weights of the module of shape (num_embeddings,
|
43 |
+
/// embedding_dim)
|
44 |
+
TORCH_ARG(torch::Tensor, _weight) = Tensor();
|
45 |
+
};
|
46 |
+
|
47 |
+
// ============================================================================
|
48 |
+
|
49 |
+
/// Options for the `Embedding::from_pretrained` function.
|
50 |
+
struct TORCH_API EmbeddingFromPretrainedOptions {
|
51 |
+
/// If ``true``, the tensor does not get updated in the learning process.
|
52 |
+
/// Equivalent to ``embedding.weight.requires_grad_(false)``. Default:
|
53 |
+
/// ``true``
|
54 |
+
TORCH_ARG(bool, freeze) = true;
|
55 |
+
/// If specified, the entries at `padding_idx` do not contribute to the
|
56 |
+
/// gradient; therefore, the embedding vector at `padding_idx` is not updated
|
57 |
+
/// during training, i.e. it remains as a fixed "pad".
|
58 |
+
TORCH_ARG(c10::optional<int64_t>, padding_idx) = c10::nullopt;
|
59 |
+
/// If given, each embedding vector with norm larger than `max_norm` is
|
60 |
+
/// renormalized to have norm `max_norm`.
|
61 |
+
TORCH_ARG(c10::optional<double>, max_norm) = c10::nullopt;
|
62 |
+
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
|
63 |
+
TORCH_ARG(double, norm_type) = 2.;
|
64 |
+
/// If given, this will scale gradients by the inverse of frequency of the
|
65 |
+
/// words in the mini-batch. Default ``false``.
|
66 |
+
TORCH_ARG(bool, scale_grad_by_freq) = false;
|
67 |
+
/// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
|
68 |
+
TORCH_ARG(bool, sparse) = false;
|
69 |
+
};
|
70 |
+
|
71 |
+
// ============================================================================
|
72 |
+
|
73 |
+
namespace functional {
|
74 |
+
|
75 |
+
/// Options for `torch::nn::functional::embedding`.
|
76 |
+
///
|
77 |
+
/// Example:
|
78 |
+
/// ```
|
79 |
+
/// namespace F = torch::nn::functional;
|
80 |
+
/// F::embedding(input, weight,
|
81 |
+
/// F::EmbeddingFuncOptions().norm_type(2.5).scale_grad_by_freq(true).sparse(true));
|
82 |
+
/// ```
|
83 |
+
struct TORCH_API EmbeddingFuncOptions {
|
84 |
+
/// If specified, the entries at `padding_idx` do not contribute to the
|
85 |
+
/// gradient; therefore, the embedding vector at `padding_idx` is not updated
|
86 |
+
/// during training, i.e. it remains as a fixed "pad".
|
87 |
+
TORCH_ARG(c10::optional<int64_t>, padding_idx) = c10::nullopt;
|
88 |
+
/// If given, each embedding vector with norm larger than `max_norm` is
|
89 |
+
/// renormalized to have norm `max_norm`.
|
90 |
+
TORCH_ARG(c10::optional<double>, max_norm) = c10::nullopt;
|
91 |
+
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
|
92 |
+
TORCH_ARG(double, norm_type) = 2.;
|
93 |
+
/// If given, this will scale gradients by the inverse of frequency of the
|
94 |
+
/// words in the mini-batch. Default ``false``.
|
95 |
+
TORCH_ARG(bool, scale_grad_by_freq) = false;
|
96 |
+
/// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
|
97 |
+
TORCH_ARG(bool, sparse) = false;
|
98 |
+
};
|
99 |
+
|
100 |
+
} // namespace functional
|
101 |
+
|
102 |
+
// ============================================================================
|
103 |
+
|
104 |
+
typedef std::variant<enumtype::kSum, enumtype::kMean, enumtype::kMax>
|
105 |
+
EmbeddingBagMode;
|
106 |
+
|
107 |
+
/// Options for the `EmbeddingBag` module.
|
108 |
+
///
|
109 |
+
/// Example:
|
110 |
+
/// ```
|
111 |
+
/// EmbeddingBag model(EmbeddingBagOptions(10,
|
112 |
+
/// 2).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true).mode(torch::kSum));
|
113 |
+
/// ```
|
114 |
+
struct TORCH_API EmbeddingBagOptions {
|
115 |
+
EmbeddingBagOptions(int64_t num_embeddings, int64_t embedding_dim);
|
116 |
+
|
117 |
+
/// The size of the dictionary of embeddings.
|
118 |
+
TORCH_ARG(int64_t, num_embeddings);
|
119 |
+
/// The size of each embedding vector.
|
120 |
+
TORCH_ARG(int64_t, embedding_dim);
|
121 |
+
/// If given, each embedding vector with norm larger than `max_norm` is
|
122 |
+
/// renormalized to have norm `max_norm`.
|
123 |
+
TORCH_ARG(c10::optional<double>, max_norm) = c10::nullopt;
|
124 |
+
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
|
125 |
+
TORCH_ARG(double, norm_type) = 2.;
|
126 |
+
/// If given, this will scale gradients by the inverse of frequency of the
|
127 |
+
/// words in the mini-batch. Default ``false``. Note: this option is not
|
128 |
+
/// supported when ``mode="kMax"``.
|
129 |
+
TORCH_ARG(bool, scale_grad_by_freq) = false;
|
130 |
+
/// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the
|
131 |
+
/// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights`
|
132 |
+
/// into consideration. ``"kMean"`` computes the average of the values in the
|
133 |
+
/// bag, ``"kMax"`` computes the max value over each bag.
|
134 |
+
TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean;
|
135 |
+
/// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
|
136 |
+
/// Note: this option is not supported when ``mode="kMax"``.
|
137 |
+
TORCH_ARG(bool, sparse) = false;
|
138 |
+
/// The learnable weights of the module of shape (num_embeddings,
|
139 |
+
/// embedding_dim)
|
140 |
+
TORCH_ARG(torch::Tensor, _weight) = Tensor();
|
141 |
+
/// If ``true``, `offsets` has one additional element, where the last element
|
142 |
+
/// is equivalent to the size of `indices`. This matches the CSR format.
|
143 |
+
TORCH_ARG(bool, include_last_offset) = false;
|
144 |
+
/// If specified, the entries at `padding_idx` do not contribute to the
|
145 |
+
/// gradient; therefore, the embedding vector at padding_idx is not updated
|
146 |
+
/// during training, i.e. it remains as a fixed "pad". For a newly constructed
|
147 |
+
/// EmbeddingBag, the embedding vector at `padding_idx` will default to all
|
148 |
+
/// zeros, but can be updated to another value to be used as the padding
|
149 |
+
/// vector. Note that the embedding vector at `padding_idx` is excluded from
|
150 |
+
/// the reduction.
|
151 |
+
TORCH_ARG(c10::optional<int64_t>, padding_idx) = c10::nullopt;
|
152 |
+
};
|
153 |
+
|
154 |
+
// ============================================================================
|
155 |
+
|
156 |
+
/// Options for the `EmbeddingBag::from_pretrained` function.
|
157 |
+
struct TORCH_API EmbeddingBagFromPretrainedOptions {
|
158 |
+
/// If ``true``, the tensor does not get updated in the learning process.
|
159 |
+
/// Equivalent to ``embeddingbag.weight.requires_grad_(false)``. Default:
|
160 |
+
/// ``true``
|
161 |
+
TORCH_ARG(bool, freeze) = true;
|
162 |
+
/// If given, each embedding vector with norm larger than `max_norm` is
|
163 |
+
/// renormalized to have norm `max_norm`.
|
164 |
+
TORCH_ARG(c10::optional<double>, max_norm) = c10::nullopt;
|
165 |
+
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
|
166 |
+
TORCH_ARG(double, norm_type) = 2.;
|
167 |
+
/// If given, this will scale gradients by the inverse of frequency of the
|
168 |
+
/// words in the mini-batch. Default ``false``. Note: this option is not
|
169 |
+
/// supported when ``mode="kMax"``.
|
170 |
+
TORCH_ARG(bool, scale_grad_by_freq) = false;
|
171 |
+
/// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the
|
172 |
+
/// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights`
|
173 |
+
/// into consideration. ``"kMean"`` computes the average of the values in the
|
174 |
+
/// bag, ``"kMax"`` computes the max value over each bag.
|
175 |
+
TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean;
|
176 |
+
/// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
|
177 |
+
/// Note: this option is not supported when ``mode="kMax"``.
|
178 |
+
TORCH_ARG(bool, sparse) = false;
|
179 |
+
/// If ``true``, `offsets` has one additional element, where the last element
|
180 |
+
/// is equivalent to the size of `indices`. This matches the CSR format. Note:
|
181 |
+
/// this option is currently only supported when ``mode="sum"``.
|
182 |
+
TORCH_ARG(bool, include_last_offset) = false;
|
183 |
+
/// If specified, the entries at `padding_idx` do not contribute to the
|
184 |
+
/// gradient; therefore, the embedding vector at padding_idx is not updated
|
185 |
+
/// during training, i.e. it remains as a fixed "pad". Note that the embedding
|
186 |
+
/// vector at `padding_idx` is excluded from the reduction.
|
187 |
+
TORCH_ARG(c10::optional<int64_t>, padding_idx) = c10::nullopt;
|
188 |
+
};
|
189 |
+
|
190 |
+
// ============================================================================
|
191 |
+
|
192 |
+
namespace functional {
|
193 |
+
|
194 |
+
/// Options for `torch::nn::functional::embedding_bag`.
|
195 |
+
///
|
196 |
+
/// Example:
|
197 |
+
/// ```
|
198 |
+
/// namespace F = torch::nn::functional;
|
199 |
+
/// F::embedding_bag(input, weight,
|
200 |
+
/// F::EmbeddingBagFuncOptions().mode(torch::kSum).offsets(offsets));
|
201 |
+
/// ```
|
202 |
+
struct TORCH_API EmbeddingBagFuncOptions {
|
203 |
+
/// Only used when `input` is 1D. `offsets` determines
|
204 |
+
/// the starting index position of each bag (sequence) in `input`.
|
205 |
+
TORCH_ARG(torch::Tensor, offsets) = Tensor();
|
206 |
+
/// If given, each embedding vector with norm larger than `max_norm` is
|
207 |
+
/// renormalized to have norm `max_norm`.
|
208 |
+
TORCH_ARG(c10::optional<double>, max_norm) = c10::nullopt;
|
209 |
+
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
|
210 |
+
TORCH_ARG(double, norm_type) = 2.;
|
211 |
+
/// If given, this will scale gradients by the inverse of frequency of the
|
212 |
+
/// words in the mini-batch. Default ``false``. Note: this option is not
|
213 |
+
/// supported when ``mode="kMax"``.
|
214 |
+
TORCH_ARG(bool, scale_grad_by_freq) = false;
|
215 |
+
/// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the
|
216 |
+
/// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights`
|
217 |
+
/// into consideration. ``"kMean"`` computes the average of the values in the
|
218 |
+
/// bag, ``"kMax"`` computes the max value over each bag.
|
219 |
+
TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean;
|
220 |
+
/// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
|
221 |
+
/// Note: this option is not supported when ``mode="kMax"``.
|
222 |
+
TORCH_ARG(bool, sparse) = false;
|
223 |
+
/// a tensor of float / double weights, or None to indicate all weights should
|
224 |
+
/// be taken to be 1. If specified, `per_sample_weights` must have exactly the
|
225 |
+
/// same shape as input and is treated as having the same `offsets`, if those
|
226 |
+
/// are not None.
|
227 |
+
TORCH_ARG(torch::Tensor, per_sample_weights) = Tensor();
|
228 |
+
/// If ``true``, `offsets` has one additional element, where the last element
|
229 |
+
/// is equivalent to the size of `indices`. This matches the CSR format. Note:
|
230 |
+
/// this option is currently only supported when ``mode="sum"``.
|
231 |
+
TORCH_ARG(bool, include_last_offset) = false;
|
232 |
+
/// If specified, the entries at `padding_idx` do not contribute to the
|
233 |
+
/// gradient; therefore, the embedding vector at padding_idx is not updated
|
234 |
+
/// during training, i.e. it remains as a fixed "pad". Note that the embedding
|
235 |
+
/// vector at `padding_idx` is excluded from the reduction.
|
236 |
+
TORCH_ARG(c10::optional<int64_t>, padding_idx) = c10::nullopt;
|
237 |
+
};
|
238 |
+
|
239 |
+
} // namespace functional
|
240 |
+
|
241 |
+
} // namespace nn
|
242 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/fold.h
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/expanding_array.h>
|
6 |
+
#include <torch/types.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace nn {
|
10 |
+
|
11 |
+
/// Options for the `Fold` module.
|
12 |
+
///
|
13 |
+
/// Example:
|
14 |
+
/// ```
|
15 |
+
/// Fold model(FoldOptions({8, 8}, {3, 3}).dilation(2).padding({2,
|
16 |
+
/// 1}).stride(2));
|
17 |
+
/// ```
|
18 |
+
struct TORCH_API FoldOptions {
|
19 |
+
FoldOptions(ExpandingArray<2> output_size, ExpandingArray<2> kernel_size)
|
20 |
+
: output_size_(std::move(output_size)),
|
21 |
+
kernel_size_(std::move(kernel_size)) {}
|
22 |
+
|
23 |
+
/// describes the spatial shape of the large containing tensor of the sliding
|
24 |
+
/// local blocks. It is useful to resolve the ambiguity when multiple input
|
25 |
+
/// shapes map to same number of sliding blocks, e.g., with stride > 0.
|
26 |
+
TORCH_ARG(ExpandingArray<2>, output_size);
|
27 |
+
|
28 |
+
/// the size of the sliding blocks
|
29 |
+
TORCH_ARG(ExpandingArray<2>, kernel_size);
|
30 |
+
|
31 |
+
/// controls the spacing between the kernel points; also known as the à trous
|
32 |
+
/// algorithm.
|
33 |
+
TORCH_ARG(ExpandingArray<2>, dilation) = 1;
|
34 |
+
|
35 |
+
/// controls the amount of implicit zero-paddings on both sides for padding
|
36 |
+
/// number of points for each dimension before reshaping.
|
37 |
+
TORCH_ARG(ExpandingArray<2>, padding) = 0;
|
38 |
+
|
39 |
+
/// controls the stride for the sliding blocks.
|
40 |
+
TORCH_ARG(ExpandingArray<2>, stride) = 1;
|
41 |
+
};
|
42 |
+
|
43 |
+
namespace functional {
|
44 |
+
/// Options for `torch::nn::functional::fold`.
|
45 |
+
///
|
46 |
+
/// See the documentation for `torch::nn::FoldOptions` class to learn what
|
47 |
+
/// arguments are supported.
|
48 |
+
///
|
49 |
+
/// Example:
|
50 |
+
/// ```
|
51 |
+
/// namespace F = torch::nn::functional;
|
52 |
+
/// F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2}));
|
53 |
+
/// ```
|
54 |
+
using FoldFuncOptions = FoldOptions;
|
55 |
+
} // namespace functional
|
56 |
+
|
57 |
+
// ============================================================================
|
58 |
+
|
59 |
+
/// Options for the `Unfold` module.
|
60 |
+
///
|
61 |
+
/// Example:
|
62 |
+
/// ```
|
63 |
+
/// Unfold model(UnfoldOptions({2, 4}).dilation(2).padding({2, 1}).stride(2));
|
64 |
+
/// ```
|
65 |
+
struct TORCH_API UnfoldOptions {
|
66 |
+
UnfoldOptions(ExpandingArray<2> kernel_size)
|
67 |
+
: kernel_size_(std::move(kernel_size)) {}
|
68 |
+
|
69 |
+
/// the size of the sliding blocks
|
70 |
+
TORCH_ARG(ExpandingArray<2>, kernel_size);
|
71 |
+
|
72 |
+
/// controls the spacing between the kernel points; also known as the à trous
|
73 |
+
/// algorithm.
|
74 |
+
TORCH_ARG(ExpandingArray<2>, dilation) = 1;
|
75 |
+
|
76 |
+
/// controls the amount of implicit zero-paddings on both sides for padding
|
77 |
+
/// number of points for each dimension before reshaping.
|
78 |
+
TORCH_ARG(ExpandingArray<2>, padding) = 0;
|
79 |
+
|
80 |
+
/// controls the stride for the sliding blocks.
|
81 |
+
TORCH_ARG(ExpandingArray<2>, stride) = 1;
|
82 |
+
};
|
83 |
+
|
84 |
+
namespace functional {
|
85 |
+
/// Options for `torch::nn::functional::unfold`.
|
86 |
+
///
|
87 |
+
/// See the documentation for `torch::nn::UnfoldOptions` class to learn what
|
88 |
+
/// arguments are supported.
|
89 |
+
///
|
90 |
+
/// Example:
|
91 |
+
/// ```
|
92 |
+
/// namespace F = torch::nn::functional;
|
93 |
+
/// F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2));
|
94 |
+
/// ```
|
95 |
+
using UnfoldFuncOptions = UnfoldOptions;
|
96 |
+
} // namespace functional
|
97 |
+
|
98 |
+
} // namespace nn
|
99 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/instancenorm.h
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/nn/options/batchnorm.h>
|
6 |
+
#include <torch/types.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace nn {
|
10 |
+
|
11 |
+
/// Options for the `InstanceNorm` module.
|
12 |
+
struct TORCH_API InstanceNormOptions {
|
13 |
+
/* implicit */ InstanceNormOptions(int64_t num_features);
|
14 |
+
|
15 |
+
/// The number of features of the input tensor.
|
16 |
+
TORCH_ARG(int64_t, num_features);
|
17 |
+
|
18 |
+
/// The epsilon value added for numerical stability.
|
19 |
+
TORCH_ARG(double, eps) = 1e-5;
|
20 |
+
|
21 |
+
/// A momentum multiplier for the mean and variance.
|
22 |
+
TORCH_ARG(double, momentum) = 0.1;
|
23 |
+
|
24 |
+
/// Whether to learn a scale and bias that are applied in an affine
|
25 |
+
/// transformation on the input.
|
26 |
+
TORCH_ARG(bool, affine) = false;
|
27 |
+
|
28 |
+
/// Whether to store and update batch statistics (mean and variance) in the
|
29 |
+
/// module.
|
30 |
+
TORCH_ARG(bool, track_running_stats) = false;
|
31 |
+
};
|
32 |
+
|
33 |
+
/// Options for the `InstanceNorm1d` module.
|
34 |
+
///
|
35 |
+
/// Example:
|
36 |
+
/// ```
|
37 |
+
/// InstanceNorm1d
|
38 |
+
/// model(InstanceNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
|
39 |
+
/// ```
|
40 |
+
using InstanceNorm1dOptions = InstanceNormOptions;
|
41 |
+
|
42 |
+
/// Options for the `InstanceNorm2d` module.
|
43 |
+
///
|
44 |
+
/// Example:
|
45 |
+
/// ```
|
46 |
+
/// InstanceNorm2d
|
47 |
+
/// model(InstanceNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
|
48 |
+
/// ```
|
49 |
+
using InstanceNorm2dOptions = InstanceNormOptions;
|
50 |
+
|
51 |
+
/// Options for the `InstanceNorm3d` module.
|
52 |
+
///
|
53 |
+
/// Example:
|
54 |
+
/// ```
|
55 |
+
/// InstanceNorm3d
|
56 |
+
/// model(InstanceNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
|
57 |
+
/// ```
|
58 |
+
using InstanceNorm3dOptions = InstanceNormOptions;
|
59 |
+
|
60 |
+
namespace functional {
|
61 |
+
|
62 |
+
/// Options for `torch::nn::functional::instance_norm`.
|
63 |
+
///
|
64 |
+
/// Example:
|
65 |
+
/// ```
|
66 |
+
/// namespace F = torch::nn::functional;
|
67 |
+
/// F::instance_norm(input,
|
68 |
+
/// F::InstanceNormFuncOptions().running_mean(mean).running_var(variance).weight(weight).bias(bias).momentum(0.1).eps(1e-5));
|
69 |
+
/// ```
|
70 |
+
struct TORCH_API InstanceNormFuncOptions {
|
71 |
+
TORCH_ARG(Tensor, running_mean) = Tensor();
|
72 |
+
|
73 |
+
TORCH_ARG(Tensor, running_var) = Tensor();
|
74 |
+
|
75 |
+
TORCH_ARG(Tensor, weight) = Tensor();
|
76 |
+
|
77 |
+
TORCH_ARG(Tensor, bias) = Tensor();
|
78 |
+
|
79 |
+
TORCH_ARG(bool, use_input_stats) = true;
|
80 |
+
|
81 |
+
TORCH_ARG(double, momentum) = 0.1;
|
82 |
+
|
83 |
+
TORCH_ARG(double, eps) = 1e-5;
|
84 |
+
};
|
85 |
+
|
86 |
+
} // namespace functional
|
87 |
+
|
88 |
+
} // namespace nn
|
89 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/types.h>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace nn {
|
9 |
+
|
10 |
+
/// Options for the `Linear` module.
|
11 |
+
///
|
12 |
+
/// Example:
|
13 |
+
/// ```
|
14 |
+
/// Linear model(LinearOptions(5, 2).bias(false));
|
15 |
+
/// ```
|
16 |
+
struct TORCH_API LinearOptions {
|
17 |
+
LinearOptions(int64_t in_features, int64_t out_features);
|
18 |
+
/// size of each input sample
|
19 |
+
TORCH_ARG(int64_t, in_features);
|
20 |
+
|
21 |
+
/// size of each output sample
|
22 |
+
TORCH_ARG(int64_t, out_features);
|
23 |
+
|
24 |
+
/// If set to false, the layer will not learn an additive bias. Default: true
|
25 |
+
TORCH_ARG(bool, bias) = true;
|
26 |
+
};
|
27 |
+
|
28 |
+
// ============================================================================
|
29 |
+
|
30 |
+
/// Options for the `Flatten` module.
|
31 |
+
///
|
32 |
+
/// Example:
|
33 |
+
/// ```
|
34 |
+
/// Flatten model(FlattenOptions().start_dim(2).end_dim(4));
|
35 |
+
/// ```
|
36 |
+
struct TORCH_API FlattenOptions {
|
37 |
+
/// first dim to flatten
|
38 |
+
TORCH_ARG(int64_t, start_dim) = 1;
|
39 |
+
/// last dim to flatten
|
40 |
+
TORCH_ARG(int64_t, end_dim) = -1;
|
41 |
+
};
|
42 |
+
|
43 |
+
// ============================================================================
|
44 |
+
|
45 |
+
/// Options for the `Unflatten` module.
|
46 |
+
///
|
47 |
+
/// Note: If input tensor is named, use dimname and namedshape arguments.
|
48 |
+
///
|
49 |
+
/// Example:
|
50 |
+
/// ```
|
51 |
+
/// Unflatten unnamed_model(UnflattenOptions(0, {2, 2}));
|
52 |
+
/// Unflatten named_model(UnflattenOptions("B", {{"B1", 2}, {"B2", 2}}));
|
53 |
+
/// ```
|
54 |
+
struct TORCH_API UnflattenOptions {
|
55 |
+
typedef std::vector<std::pair<std::string, int64_t>> namedshape_t;
|
56 |
+
|
57 |
+
UnflattenOptions(int64_t dim, std::vector<int64_t> sizes);
|
58 |
+
UnflattenOptions(const char* dimname, namedshape_t namedshape);
|
59 |
+
UnflattenOptions(std::string dimname, namedshape_t namedshape);
|
60 |
+
|
61 |
+
/// dim to unflatten
|
62 |
+
TORCH_ARG(int64_t, dim);
|
63 |
+
/// name of dim to unflatten, for use with named tensors
|
64 |
+
TORCH_ARG(std::string, dimname);
|
65 |
+
/// new shape of unflattened dim
|
66 |
+
TORCH_ARG(std::vector<int64_t>, sizes);
|
67 |
+
/// new shape of unflattened dim with names, for use with named tensors
|
68 |
+
TORCH_ARG(namedshape_t, namedshape);
|
69 |
+
};
|
70 |
+
|
71 |
+
// ============================================================================
|
72 |
+
|
73 |
+
/// Options for the `Bilinear` module.
|
74 |
+
///
|
75 |
+
/// Example:
|
76 |
+
/// ```
|
77 |
+
/// Bilinear model(BilinearOptions(3, 2, 4).bias(false));
|
78 |
+
/// ```
|
79 |
+
struct TORCH_API BilinearOptions {
|
80 |
+
BilinearOptions(
|
81 |
+
int64_t in1_features,
|
82 |
+
int64_t in2_features,
|
83 |
+
int64_t out_features);
|
84 |
+
/// The number of features in input 1 (columns of the input1 matrix).
|
85 |
+
TORCH_ARG(int64_t, in1_features);
|
86 |
+
/// The number of features in input 2 (columns of the input2 matrix).
|
87 |
+
TORCH_ARG(int64_t, in2_features);
|
88 |
+
/// The number of output features to produce (columns of the output matrix).
|
89 |
+
TORCH_ARG(int64_t, out_features);
|
90 |
+
/// Whether to learn and add a bias after the bilinear transformation.
|
91 |
+
TORCH_ARG(bool, bias) = true;
|
92 |
+
};
|
93 |
+
|
94 |
+
} // namespace nn
|
95 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h
ADDED
@@ -0,0 +1,802 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/enum.h>
|
6 |
+
#include <torch/types.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace nn {
|
10 |
+
|
11 |
+
/// Options for the `L1Loss` module.
|
12 |
+
///
|
13 |
+
/// Example:
|
14 |
+
/// ```
|
15 |
+
/// L1Loss model(L1LossOptions(torch::kNone));
|
16 |
+
/// ```
|
17 |
+
struct TORCH_API L1LossOptions {
|
18 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
19 |
+
reduction_t;
|
20 |
+
|
21 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG3(L1LossOptions, reduction, kNone, kMean, kSum)
|
22 |
+
|
23 |
+
/// Specifies the reduction to apply to the output.
|
24 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
25 |
+
};
|
26 |
+
|
27 |
+
namespace functional {
|
28 |
+
/// Options for `torch::nn::functional::l1_loss`.
|
29 |
+
///
|
30 |
+
/// See the documentation for `torch::nn::L1LossOptions` class to learn what
|
31 |
+
/// arguments are supported.
|
32 |
+
///
|
33 |
+
/// Example:
|
34 |
+
/// ```
|
35 |
+
/// namespace F = torch::nn::functional;
|
36 |
+
/// F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone));
|
37 |
+
/// ```
|
38 |
+
using L1LossFuncOptions = L1LossOptions;
|
39 |
+
} // namespace functional
|
40 |
+
|
41 |
+
// ============================================================================
|
42 |
+
|
43 |
+
/// Options for the `KLDivLoss` module.
|
44 |
+
///
|
45 |
+
/// Example:
|
46 |
+
/// ```
|
47 |
+
/// KLDivLoss
|
48 |
+
/// model(KLDivLossOptions().reduction(torch::kNone).log_target(false));
|
49 |
+
/// ```
|
50 |
+
struct TORCH_API KLDivLossOptions {
|
51 |
+
typedef std::variant<
|
52 |
+
enumtype::kNone,
|
53 |
+
enumtype::kBatchMean,
|
54 |
+
enumtype::kSum,
|
55 |
+
enumtype::kMean>
|
56 |
+
reduction_t;
|
57 |
+
|
58 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG4(
|
59 |
+
KLDivLossOptions,
|
60 |
+
reduction,
|
61 |
+
kNone,
|
62 |
+
kBatchMean,
|
63 |
+
kSum,
|
64 |
+
kMean)
|
65 |
+
|
66 |
+
/// Specifies the reduction to apply to the output.
|
67 |
+
/// ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. Default: ``'mean'``
|
68 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
69 |
+
|
70 |
+
/// Specifies whether `target` is accepted in the log space. Default: False
|
71 |
+
TORCH_ARG(bool, log_target) = false;
|
72 |
+
};
|
73 |
+
|
74 |
+
namespace functional {
|
75 |
+
/// Options for `torch::nn::functional::kl_div`.
|
76 |
+
///
|
77 |
+
/// See the documentation for `torch::nn::KLDivLossOptions` class to learn what
|
78 |
+
/// arguments are supported.
|
79 |
+
///
|
80 |
+
/// Example:
|
81 |
+
/// ```
|
82 |
+
/// namespace F = torch::nn::functional;
|
83 |
+
/// F::kl_div(input, target,
|
84 |
+
/// F::KLDivFuncOptions().reduction(torch::kNone).log_target(false));
|
85 |
+
/// ```
|
86 |
+
using KLDivFuncOptions = KLDivLossOptions;
|
87 |
+
} // namespace functional
|
88 |
+
|
89 |
+
// ============================================================================
|
90 |
+
|
91 |
+
/// Options for the `MSELoss` module.
|
92 |
+
///
|
93 |
+
/// Example:
|
94 |
+
/// ```
|
95 |
+
/// MSELoss model(MSELossOptions(torch::kNone));
|
96 |
+
/// ```
|
97 |
+
struct TORCH_API MSELossOptions {
|
98 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
99 |
+
reduction_t;
|
100 |
+
|
101 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG3(MSELossOptions, reduction, kNone, kMean, kSum)
|
102 |
+
|
103 |
+
/// Specifies the reduction to apply to the output.
|
104 |
+
/// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``
|
105 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
106 |
+
};
|
107 |
+
|
108 |
+
namespace functional {
|
109 |
+
/// Options for `torch::nn::functional::mse_loss`.
|
110 |
+
///
|
111 |
+
/// See the documentation for `torch::nn::MSELossOptions` class to learn what
|
112 |
+
/// arguments are supported.
|
113 |
+
///
|
114 |
+
/// Example:
|
115 |
+
/// ```
|
116 |
+
/// namespace F = torch::nn::functional;
|
117 |
+
/// F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone));
|
118 |
+
/// ```
|
119 |
+
using MSELossFuncOptions = MSELossOptions;
|
120 |
+
} // namespace functional
|
121 |
+
|
122 |
+
// ============================================================================
|
123 |
+
|
124 |
+
/// Options for the `BCELoss` module.
|
125 |
+
///
|
126 |
+
/// Example:
|
127 |
+
/// ```
|
128 |
+
/// BCELoss model(BCELossOptions().reduction(torch::kNone).weight(weight));
|
129 |
+
/// ```
|
130 |
+
struct TORCH_API BCELossOptions {
|
131 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
132 |
+
reduction_t;
|
133 |
+
|
134 |
+
/// A manual rescaling weight given to the loss of each batch element.
|
135 |
+
TORCH_ARG(Tensor, weight) = {};
|
136 |
+
/// Specifies the reduction to apply to the output.
|
137 |
+
/// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``
|
138 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
139 |
+
};
|
140 |
+
|
141 |
+
namespace functional {
|
142 |
+
/// Options for `torch::nn::functional::binary_cross_entropy`.
|
143 |
+
///
|
144 |
+
/// See the documentation for `torch::nn::BCELossOptions` class to learn what
|
145 |
+
/// arguments are supported.
|
146 |
+
///
|
147 |
+
/// Example:
|
148 |
+
/// ```
|
149 |
+
/// namespace F = torch::nn::functional;
|
150 |
+
/// F::binary_cross_entropy(input, target,
|
151 |
+
/// F::BinaryCrossEntropyFuncOptions().weight(weight));
|
152 |
+
/// ```
|
153 |
+
using BinaryCrossEntropyFuncOptions = BCELossOptions;
|
154 |
+
} // namespace functional
|
155 |
+
|
156 |
+
// ============================================================================
|
157 |
+
|
158 |
+
/// Options for the `HingeEmbeddingLoss` module.
|
159 |
+
///
|
160 |
+
/// Example:
|
161 |
+
/// ```
|
162 |
+
/// HingeEmbeddingLoss
|
163 |
+
/// model(HingeEmbeddingLossOptions().margin(4).reduction(torch::kNone));
|
164 |
+
/// ```
|
165 |
+
struct TORCH_API HingeEmbeddingLossOptions {
|
166 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
167 |
+
reduction_t;
|
168 |
+
|
169 |
+
/// Specifies the threshold for which the distance of a negative sample must
|
170 |
+
/// reach in order to incur zero loss. Default: 1
|
171 |
+
TORCH_ARG(double, margin) = 1.0;
|
172 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
173 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
174 |
+
};
|
175 |
+
|
176 |
+
namespace functional {
|
177 |
+
/// Options for `torch::nn::functional::hinge_embedding_loss`.
|
178 |
+
///
|
179 |
+
/// See the documentation for `torch::nn::HingeEmbeddingLossOptions` class to
|
180 |
+
/// learn what arguments are supported.
|
181 |
+
///
|
182 |
+
/// Example:
|
183 |
+
/// ```
|
184 |
+
/// namespace F = torch::nn::functional;
|
185 |
+
/// F::hinge_embedding_loss(input, target,
|
186 |
+
/// F::HingeEmbeddingLossFuncOptions().margin(2));
|
187 |
+
/// ```
|
188 |
+
using HingeEmbeddingLossFuncOptions = HingeEmbeddingLossOptions;
|
189 |
+
} // namespace functional
|
190 |
+
|
191 |
+
// ============================================================================
|
192 |
+
|
193 |
+
/// Options for the `MultiMarginLoss` module.
|
194 |
+
///
|
195 |
+
/// Example:
|
196 |
+
/// ```
|
197 |
+
/// MultiMarginLoss model(MultiMarginLossOptions().margin(2).weight(weight));
|
198 |
+
/// ```
|
199 |
+
struct TORCH_API MultiMarginLossOptions {
|
200 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
201 |
+
reduction_t;
|
202 |
+
|
203 |
+
/// Has a default value of :math:`1`. :math:`1` and :math:`2`
|
204 |
+
/// are the only supported values.
|
205 |
+
TORCH_ARG(int64_t, p) = 1;
|
206 |
+
/// Has a default value of :math:`1`.
|
207 |
+
TORCH_ARG(double, margin) = 1.0;
|
208 |
+
/// A manual rescaling weight given to each
|
209 |
+
/// class. If given, it has to be a Tensor of size `C`. Otherwise, it is
|
210 |
+
/// treated as if having all ones.
|
211 |
+
TORCH_ARG(Tensor, weight) = Tensor();
|
212 |
+
/// Specifies the reduction to apply to the output:
|
213 |
+
/// ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
|
214 |
+
/// applied,
|
215 |
+
/// ``'mean'``: the sum of the output will be divided by the number of
|
216 |
+
/// elements in the output, ``'sum'``: the output will be summed. Default:
|
217 |
+
/// ``'mean'``
|
218 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
219 |
+
};
|
220 |
+
|
221 |
+
namespace functional {
|
222 |
+
/// Options for `torch::nn::functional::multi_margin_loss`.
|
223 |
+
///
|
224 |
+
/// See the documentation for `torch::nn::MultiMarginLossOptions` class to learn
|
225 |
+
/// what arguments are supported.
|
226 |
+
///
|
227 |
+
/// Example:
|
228 |
+
/// ```
|
229 |
+
/// namespace F = torch::nn::functional;
|
230 |
+
/// F::multi_margin_loss(input, target,
|
231 |
+
/// F::MultiMarginLossFuncOptions().margin(2).weight(weight));
|
232 |
+
/// ```
|
233 |
+
using MultiMarginLossFuncOptions = MultiMarginLossOptions;
|
234 |
+
} // namespace functional
|
235 |
+
|
236 |
+
// ============================================================================
|
237 |
+
|
238 |
+
/// Options for the `CosineEmbeddingLoss` module.
|
239 |
+
///
|
240 |
+
/// Example:
|
241 |
+
/// ```
|
242 |
+
/// CosineEmbeddingLoss model(CosineEmbeddingLossOptions().margin(0.5));
|
243 |
+
/// ```
|
244 |
+
struct TORCH_API CosineEmbeddingLossOptions {
|
245 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
246 |
+
reduction_t;
|
247 |
+
|
248 |
+
/// Specifies the threshold for which the distance of a negative sample must
|
249 |
+
/// reach in order to incur zero loss. Should be a number from -1 to 1, 0
|
250 |
+
/// to 0.5 is suggested. Default: 0.0
|
251 |
+
TORCH_ARG(double, margin) = 0.0;
|
252 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
253 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
254 |
+
};
|
255 |
+
|
256 |
+
namespace functional {
|
257 |
+
/// Options for `torch::nn::functional::cosine_embedding_loss`.
|
258 |
+
///
|
259 |
+
/// See the documentation for `torch::nn::CosineEmbeddingLossOptions` class to
|
260 |
+
/// learn what arguments are supported.
|
261 |
+
///
|
262 |
+
/// Example:
|
263 |
+
/// ```
|
264 |
+
/// namespace F = torch::nn::functional;
|
265 |
+
/// F::cosine_embedding_loss(input1, input2, target,
|
266 |
+
/// F::CosineEmbeddingLossFuncOptions().margin(0.5));
|
267 |
+
/// ```
|
268 |
+
using CosineEmbeddingLossFuncOptions = CosineEmbeddingLossOptions;
|
269 |
+
} // namespace functional
|
270 |
+
|
271 |
+
// ============================================================================
|
272 |
+
|
273 |
+
/// Options for the `MultiLabelMarginLoss` module.
|
274 |
+
///
|
275 |
+
/// Example:
|
276 |
+
/// ```
|
277 |
+
/// MultiLabelMarginLoss model(MultiLabelMarginLossOptions(torch::kNone));
|
278 |
+
/// ```
|
279 |
+
struct TORCH_API MultiLabelMarginLossOptions {
|
280 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
281 |
+
reduction_t;
|
282 |
+
|
283 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG3(
|
284 |
+
MultiLabelMarginLossOptions,
|
285 |
+
reduction,
|
286 |
+
kNone,
|
287 |
+
kMean,
|
288 |
+
kSum)
|
289 |
+
|
290 |
+
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
|
291 |
+
/// 'none': no reduction will be applied, 'mean': the sum of the output will
|
292 |
+
/// be divided by the number of elements in the output, 'sum': the output will
|
293 |
+
/// be summed. Default: 'mean'
|
294 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
295 |
+
};
|
296 |
+
|
297 |
+
namespace functional {
|
298 |
+
/// Options for `torch::nn::functional::multilabel_margin_loss`.
|
299 |
+
///
|
300 |
+
/// See the documentation for `torch::nn::MultiLabelMarginLossOptions` class to
|
301 |
+
/// learn what arguments are supported.
|
302 |
+
///
|
303 |
+
/// Example:
|
304 |
+
/// ```
|
305 |
+
/// namespace F = torch::nn::functional;
|
306 |
+
/// F::multilabel_margin_loss(input, target,
|
307 |
+
/// F::MultilabelMarginLossFuncOptions(torch::kNone));
|
308 |
+
/// ```
|
309 |
+
using MultilabelMarginLossFuncOptions = MultiLabelMarginLossOptions;
|
310 |
+
} // namespace functional
|
311 |
+
|
312 |
+
// ============================================================================
|
313 |
+
|
314 |
+
/// Options for the `SoftMarginLoss` module.
|
315 |
+
///
|
316 |
+
/// Example:
|
317 |
+
/// ```
|
318 |
+
/// SoftMarginLoss model(SoftMarginLossOptions(torch::kNone));
|
319 |
+
/// ```
|
320 |
+
struct TORCH_API SoftMarginLossOptions {
|
321 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
322 |
+
reduction_t;
|
323 |
+
|
324 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG3(
|
325 |
+
SoftMarginLossOptions,
|
326 |
+
reduction,
|
327 |
+
kNone,
|
328 |
+
kMean,
|
329 |
+
kSum)
|
330 |
+
|
331 |
+
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
|
332 |
+
/// 'none': no reduction will be applied, 'mean': the sum of the output will
|
333 |
+
/// be divided by the number of elements in the output, 'sum': the output will
|
334 |
+
/// be summed. Default: 'mean'
|
335 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
336 |
+
};
|
337 |
+
|
338 |
+
namespace functional {
|
339 |
+
/// Options for `torch::nn::functional::soft_margin_loss`.
|
340 |
+
///
|
341 |
+
/// See the documentation for `torch::nn::SoftMarginLossOptions` class to learn
|
342 |
+
/// what arguments are supported.
|
343 |
+
///
|
344 |
+
/// Example:
|
345 |
+
/// ```
|
346 |
+
/// namespace F = torch::nn::functional;
|
347 |
+
/// F::soft_margin_loss(input, target,
|
348 |
+
/// F::SoftMarginLossFuncOptions(torch::kNone));
|
349 |
+
/// ```
|
350 |
+
using SoftMarginLossFuncOptions = SoftMarginLossOptions;
|
351 |
+
} // namespace functional
|
352 |
+
|
353 |
+
// ============================================================================
|
354 |
+
|
355 |
+
/// Options for the `MultiLabelSoftMarginLoss` module.
|
356 |
+
///
|
357 |
+
/// Example:
|
358 |
+
/// ```
|
359 |
+
/// MultiLabelSoftMarginLoss
|
360 |
+
/// model(MultiLabelSoftMarginLossOptions().reduction(torch::kNone).weight(weight));
|
361 |
+
/// ```
|
362 |
+
struct TORCH_API MultiLabelSoftMarginLossOptions {
|
363 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
364 |
+
reduction_t;
|
365 |
+
|
366 |
+
/// A manual rescaling weight given to each
|
367 |
+
/// class. If given, it has to be a Tensor of size `C`. Otherwise, it is
|
368 |
+
/// treated as if having all ones.
|
369 |
+
TORCH_ARG(Tensor, weight) = Tensor();
|
370 |
+
|
371 |
+
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
|
372 |
+
/// 'none': no reduction will be applied, 'mean': the sum of the output will
|
373 |
+
/// be divided by the number of elements in the output, 'sum': the output will
|
374 |
+
/// be summed. Default: 'mean'
|
375 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
376 |
+
};
|
377 |
+
|
378 |
+
namespace functional {
|
379 |
+
/// Options for `torch::nn::functional::multilabel_soft_margin_loss`.
|
380 |
+
///
|
381 |
+
/// See the documentation for `torch::nn::MultiLabelSoftMarginLossOptions` class
|
382 |
+
/// to learn what arguments are supported.
|
383 |
+
///
|
384 |
+
/// Example:
|
385 |
+
/// ```
|
386 |
+
/// namespace F = torch::nn::functional;
|
387 |
+
/// F::multilabel_soft_margin_loss(input, target,
|
388 |
+
/// F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight));
|
389 |
+
/// ```
|
390 |
+
using MultilabelSoftMarginLossFuncOptions = MultiLabelSoftMarginLossOptions;
|
391 |
+
} // namespace functional
|
392 |
+
|
393 |
+
// ============================================================================
|
394 |
+
|
395 |
+
/// Options for the `TripletMarginLoss` module.
|
396 |
+
///
|
397 |
+
/// Example:
|
398 |
+
/// ```
|
399 |
+
/// TripletMarginLoss
|
400 |
+
/// model(TripletMarginLossOptions().margin(3).p(2).eps(1e-06).swap(false));
|
401 |
+
/// ```
|
402 |
+
struct TORCH_API TripletMarginLossOptions {
|
403 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
404 |
+
reduction_t;
|
405 |
+
|
406 |
+
/// Specifies the threshold for which the distance of a negative sample must
|
407 |
+
/// reach in order to incur zero loss. Default: 1
|
408 |
+
TORCH_ARG(double, margin) = 1.0;
|
409 |
+
/// Specifies the norm degree for pairwise distance. Default: 2
|
410 |
+
TORCH_ARG(double, p) = 2.0;
|
411 |
+
TORCH_ARG(double, eps) = 1e-6;
|
412 |
+
/// The distance swap is described in detail in the paper Learning shallow
|
413 |
+
/// convolutional feature descriptors with triplet losses by V. Balntas,
|
414 |
+
/// E. Riba et al. Default: False
|
415 |
+
TORCH_ARG(bool, swap) = false;
|
416 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
417 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
418 |
+
};
|
419 |
+
|
420 |
+
namespace functional {
|
421 |
+
/// Options for `torch::nn::functional::triplet_margin_loss`.
|
422 |
+
///
|
423 |
+
/// See the documentation for `torch::nn::TripletMarginLossOptions` class to
|
424 |
+
/// learn what arguments are supported.
|
425 |
+
///
|
426 |
+
/// Example:
|
427 |
+
/// ```
|
428 |
+
/// namespace F = torch::nn::functional;
|
429 |
+
/// F::triplet_margin_loss(anchor, positive, negative,
|
430 |
+
/// F::TripletMarginLossFuncOptions().margin(1.0));
|
431 |
+
/// ```
|
432 |
+
using TripletMarginLossFuncOptions = TripletMarginLossOptions;
|
433 |
+
} // namespace functional
|
434 |
+
|
435 |
+
// ============================================================================
|
436 |
+
|
437 |
+
/// Options for the `TripletMarginWithDistanceLoss` module.
|
438 |
+
///
|
439 |
+
/// Example:
|
440 |
+
/// ```
|
441 |
+
/// TripletMarginWithDistanceLoss
|
442 |
+
/// model(TripletMarginWithDistanceLossOptions().margin(3).swap(false));
|
443 |
+
/// ```
|
444 |
+
struct TORCH_API TripletMarginWithDistanceLossOptions {
|
445 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
446 |
+
reduction_t;
|
447 |
+
typedef std::function<Tensor(const Tensor&, const Tensor&)>
|
448 |
+
distance_function_t;
|
449 |
+
|
450 |
+
/// Specifies a nonnegative, real-valued function that quantifies the
|
451 |
+
/// closeness of two tensors. If not specified, `F::pairwise_distance` will
|
452 |
+
/// be used. Default: nullopt
|
453 |
+
TORCH_ARG(c10::optional<distance_function_t>, distance_function) =
|
454 |
+
c10::nullopt;
|
455 |
+
/// Specifies a nonnegative margin representing the minimum difference
|
456 |
+
/// between the positive and negative distances required for the loss to be 0.
|
457 |
+
/// Larger margins penalize cases where the negative examples are not distance
|
458 |
+
/// enough from the anchors, relative to the positives. Default: 1
|
459 |
+
TORCH_ARG(double, margin) = 1.0;
|
460 |
+
/// Whether to use the distance swap described in the paper Learning shallow
|
461 |
+
/// convolutional feature descriptors with triplet losses by V. Balntas,
|
462 |
+
/// E. Riba et al. If True, and if the positive example is closer to the
|
463 |
+
/// negative example than the anchor is, swaps the positive example and the
|
464 |
+
/// anchor in the loss computation. Default: False
|
465 |
+
TORCH_ARG(bool, swap) = false;
|
466 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
467 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
468 |
+
};
|
469 |
+
|
470 |
+
namespace functional {
|
471 |
+
/// Options for `torch::nn::functional::triplet_margin_with_distance_loss`.
|
472 |
+
///
|
473 |
+
/// See the documentation for `torch::nn::TripletMarginWithDistanceLossOptions`
|
474 |
+
/// class to learn what arguments are supported.
|
475 |
+
///
|
476 |
+
/// Example:
|
477 |
+
/// ```
|
478 |
+
/// namespace F = torch::nn::functional;
|
479 |
+
/// F::triplet_margin_with_distance_loss(anchor, positive, negative,
|
480 |
+
/// F::TripletMarginWithDistanceLossFuncOptions().margin(1.0));
|
481 |
+
/// ```
|
482 |
+
using TripletMarginWithDistanceLossFuncOptions =
|
483 |
+
TripletMarginWithDistanceLossOptions;
|
484 |
+
} // namespace functional
|
485 |
+
|
486 |
+
// ============================================================================
|
487 |
+
|
488 |
+
/// Options for the `CTCLoss` module.
|
489 |
+
///
|
490 |
+
/// Example:
|
491 |
+
/// ```
|
492 |
+
/// CTCLoss
|
493 |
+
/// model(CTCLossOptions().blank(42).zero_infinity(false).reduction(torch::kSum));
|
494 |
+
/// ```
|
495 |
+
struct TORCH_API CTCLossOptions {
|
496 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
497 |
+
reduction_t;
|
498 |
+
|
499 |
+
/// blank label. Default `0`.
|
500 |
+
TORCH_ARG(int64_t, blank) = 0;
|
501 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
502 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
503 |
+
/// Whether to zero infinite losses and the associated gradients.
|
504 |
+
/// Default: `false`. Infinite losses mainly occur when the inputs are
|
505 |
+
/// too short to be aligned to the targets.
|
506 |
+
TORCH_ARG(bool, zero_infinity) = false;
|
507 |
+
};
|
508 |
+
|
509 |
+
namespace functional {
|
510 |
+
/// Options for `torch::nn::functional::ctc_loss`.
|
511 |
+
///
|
512 |
+
/// See the documentation for `torch::nn::CTCLossOptions` class to learn what
|
513 |
+
/// arguments are supported.
|
514 |
+
///
|
515 |
+
/// Example:
|
516 |
+
/// ```
|
517 |
+
/// namespace F = torch::nn::functional;
|
518 |
+
/// F::ctc_loss(log_probs, targets, input_lengths, target_lengths,
|
519 |
+
/// F::CTCLossFuncOptions().reduction(torch::kNone));
|
520 |
+
/// ```
|
521 |
+
using CTCLossFuncOptions = CTCLossOptions;
|
522 |
+
} // namespace functional
|
523 |
+
|
524 |
+
// ============================================================================
|
525 |
+
|
526 |
+
/// Options for the `SmoothL1Loss` module.
|
527 |
+
///
|
528 |
+
/// Example:
|
529 |
+
/// ```
|
530 |
+
/// SmoothL1Loss model(SmoothL1LossOptions().reduction(torch::kNone).beta(0.5));
|
531 |
+
/// ```
|
532 |
+
struct TORCH_API SmoothL1LossOptions {
|
533 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
534 |
+
reduction_t;
|
535 |
+
|
536 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG3(
|
537 |
+
SmoothL1LossOptions,
|
538 |
+
reduction,
|
539 |
+
kNone,
|
540 |
+
kMean,
|
541 |
+
kSum)
|
542 |
+
|
543 |
+
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
|
544 |
+
/// 'none': no reduction will be applied, 'mean': the sum of the output will
|
545 |
+
/// be divided by the number of elements in the output, 'sum': the output will
|
546 |
+
/// be summed. Default: 'mean'
|
547 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
548 |
+
/// Specifies the threshold at which to change between L1 and L2 loss.
|
549 |
+
/// If beta is not specified, a value of 1.0 will be used.
|
550 |
+
/// Default: nullopt
|
551 |
+
TORCH_ARG(c10::optional<double>, beta) = c10::nullopt;
|
552 |
+
};
|
553 |
+
|
554 |
+
namespace functional {
|
555 |
+
/// Options for `torch::nn::functional::smooth_l1_loss`.
|
556 |
+
///
|
557 |
+
/// See the documentation for `torch::nn::SmoothL1LossOptions` class to learn
|
558 |
+
/// what arguments are supported.
|
559 |
+
///
|
560 |
+
/// Example:
|
561 |
+
/// ```
|
562 |
+
/// namespace F = torch::nn::functional;
|
563 |
+
/// F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone));
|
564 |
+
/// ```
|
565 |
+
using SmoothL1LossFuncOptions = SmoothL1LossOptions;
|
566 |
+
} // namespace functional
|
567 |
+
|
568 |
+
// ============================================================================
|
569 |
+
|
570 |
+
/// Options for the `HuberLoss` module.
|
571 |
+
///
|
572 |
+
/// Example:
|
573 |
+
/// ```
|
574 |
+
/// HuberLoss model(HuberLossOptions().reduction(torch::kNone).delta(0.5));
|
575 |
+
/// ```
|
576 |
+
struct TORCH_API HuberLossOptions {
|
577 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
578 |
+
reduction_t;
|
579 |
+
|
580 |
+
TORCH_OPTIONS_CTOR_VARIANT_ARG3(
|
581 |
+
HuberLossOptions,
|
582 |
+
reduction,
|
583 |
+
kNone,
|
584 |
+
kMean,
|
585 |
+
kSum)
|
586 |
+
|
587 |
+
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
|
588 |
+
/// 'none': no reduction will be applied, 'mean': the sum of the output will
|
589 |
+
/// be divided by the number of elements in the output, 'sum': the output will
|
590 |
+
/// be summed. Default: 'mean'
|
591 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
592 |
+
/// Specifies the threshold at which to change between L1 and L2 loss.
|
593 |
+
/// Default: 1.0
|
594 |
+
TORCH_ARG(double, delta) = 1.0;
|
595 |
+
};
|
596 |
+
|
597 |
+
namespace functional {
|
598 |
+
/// Options for `torch::nn::functional::huber_loss`.
|
599 |
+
///
|
600 |
+
/// See the documentation for `torch::nn::HuberLossOptions` class to learn what
|
601 |
+
/// arguments are supported.
|
602 |
+
///
|
603 |
+
/// Example:
|
604 |
+
/// ```
|
605 |
+
/// namespace F = torch::nn::functional;
|
606 |
+
/// F::huber_loss(input, target, F::HuberLossFuncOptions(torch::kNone));
|
607 |
+
/// ```
|
608 |
+
using HuberLossFuncOptions = HuberLossOptions;
|
609 |
+
} // namespace functional
|
610 |
+
|
611 |
+
// ============================================================================
|
612 |
+
|
613 |
+
/// Options for the `PoissonNLLLoss` module.
|
614 |
+
///
|
615 |
+
/// Example:
|
616 |
+
/// ```
|
617 |
+
/// PoissonNLLLoss
|
618 |
+
/// model(PoissonNLLLossOptions().log_input(false).full(true).eps(0.42).reduction(torch::kSum));
|
619 |
+
/// ```
|
620 |
+
struct TORCH_API PoissonNLLLossOptions {
|
621 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
622 |
+
reduction_t;
|
623 |
+
|
624 |
+
/// if true the loss is computed as `exp(input) - target * input`,
|
625 |
+
/// if false the loss is `input - target * log(input + eps)`.
|
626 |
+
TORCH_ARG(bool, log_input) = true;
|
627 |
+
/// whether to compute full loss, i.e. to add the Stirling approximation term
|
628 |
+
/// target * log(target) - target + 0.5 * log(2 * pi * target).
|
629 |
+
TORCH_ARG(bool, full) = false;
|
630 |
+
/// Small value to avoid evaluation of `log(0)` when `log_input = false`.
|
631 |
+
/// Default: 1e-8
|
632 |
+
TORCH_ARG(double, eps) = 1e-8;
|
633 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
634 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
635 |
+
};
|
636 |
+
|
637 |
+
namespace functional {
|
638 |
+
/// Options for `torch::nn::functional::poisson_nll_loss`.
|
639 |
+
///
|
640 |
+
/// See the documentation for `torch::nn::PoissonNLLLossOptions` class to learn
|
641 |
+
/// what arguments are supported.
|
642 |
+
///
|
643 |
+
/// Example:
|
644 |
+
/// ```
|
645 |
+
/// namespace F = torch::nn::functional;
|
646 |
+
/// F::poisson_nll_loss(input, target,
|
647 |
+
/// F::PoissonNLLLossFuncOptions().reduction(torch::kNone));
|
648 |
+
/// ```
|
649 |
+
using PoissonNLLLossFuncOptions = PoissonNLLLossOptions;
|
650 |
+
} // namespace functional
|
651 |
+
|
652 |
+
// ============================================================================
|
653 |
+
|
654 |
+
/// Options for the `MarginRankingLoss` module.
|
655 |
+
///
|
656 |
+
/// Example:
|
657 |
+
/// ```
|
658 |
+
/// MarginRankingLoss
|
659 |
+
/// model(MarginRankingLossOptions().margin(0.5).reduction(torch::kSum));
|
660 |
+
/// ```
|
661 |
+
struct TORCH_API MarginRankingLossOptions {
|
662 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
663 |
+
reduction_t;
|
664 |
+
|
665 |
+
/// Has a default value of `0`.
|
666 |
+
TORCH_ARG(double, margin) = 0;
|
667 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
668 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
669 |
+
};
|
670 |
+
|
671 |
+
namespace functional {
|
672 |
+
/// Options for `torch::nn::functional::margin_ranking_loss`.
|
673 |
+
///
|
674 |
+
/// See the documentation for `torch::nn::MarginRankingLossOptions` class to
|
675 |
+
/// learn what arguments are supported.
|
676 |
+
///
|
677 |
+
/// Example:
|
678 |
+
/// ```
|
679 |
+
/// namespace F = torch::nn::functional;
|
680 |
+
/// F::margin_ranking_loss(input1, input2, target,
|
681 |
+
/// F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum));
|
682 |
+
/// ```
|
683 |
+
using MarginRankingLossFuncOptions = MarginRankingLossOptions;
|
684 |
+
} // namespace functional
|
685 |
+
|
686 |
+
// ============================================================================
|
687 |
+
|
688 |
+
/// Options for the `NLLLoss` module.
|
689 |
+
///
|
690 |
+
/// Example:
|
691 |
+
/// ```
|
692 |
+
/// NLLLoss model(NLLLossOptions().ignore_index(-100).reduction(torch::kMean));
|
693 |
+
/// ```
|
694 |
+
struct TORCH_API NLLLossOptions {
|
695 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
696 |
+
reduction_t;
|
697 |
+
|
698 |
+
/// A manual rescaling weight given to each
|
699 |
+
/// class. If given, it has to be a Tensor of size `C`. Otherwise, it is
|
700 |
+
/// treated as if having all ones.
|
701 |
+
TORCH_ARG(Tensor, weight) = {};
|
702 |
+
/// Specifies a target value that is ignored
|
703 |
+
/// and does not contribute to the input gradient.
|
704 |
+
TORCH_ARG(int64_t, ignore_index) = -100;
|
705 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
706 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
707 |
+
};
|
708 |
+
|
709 |
+
namespace functional {
|
710 |
+
/// Options for `torch::nn::functional::nll_loss`.
|
711 |
+
///
|
712 |
+
/// See the documentation for `torch::nn::NLLLossOptions` class to learn what
|
713 |
+
/// arguments are supported.
|
714 |
+
///
|
715 |
+
/// Example:
|
716 |
+
/// ```
|
717 |
+
/// namespace F = torch::nn::functional;
|
718 |
+
/// F::nll_loss(input, target,
|
719 |
+
/// F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean));
|
720 |
+
/// ```
|
721 |
+
using NLLLossFuncOptions = NLLLossOptions;
|
722 |
+
} // namespace functional
|
723 |
+
|
724 |
+
// ============================================================================
|
725 |
+
|
726 |
+
/// Options for the `CrossEntropyLoss` module.
|
727 |
+
///
|
728 |
+
/// Example:
|
729 |
+
/// ```
|
730 |
+
/// CrossEntropyLoss
|
731 |
+
/// model(CrossEntropyLossOptions().ignore_index(-100).reduction(torch::kMean));
|
732 |
+
/// ```
|
733 |
+
struct TORCH_API CrossEntropyLossOptions {
|
734 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
735 |
+
reduction_t;
|
736 |
+
|
737 |
+
/// A manual rescaling weight given to each class. If given, has to be a
|
738 |
+
/// Tensor of size C
|
739 |
+
TORCH_ARG(Tensor, weight) = {};
|
740 |
+
/// Specifies a target value that is ignored
|
741 |
+
/// and does not contribute to the input gradient.
|
742 |
+
TORCH_ARG(int64_t, ignore_index) = -100;
|
743 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
744 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
745 |
+
/// Specifies the amount of smoothing when computing the loss. Default: 0.0
|
746 |
+
TORCH_ARG(double, label_smoothing) = 0.0;
|
747 |
+
};
|
748 |
+
|
749 |
+
namespace functional {
|
750 |
+
/// Options for `torch::nn::functional::cross_entropy`.
|
751 |
+
///
|
752 |
+
/// See the documentation for `torch::nn::CrossEntropyLossOptions` class to
|
753 |
+
/// learn what arguments are supported.
|
754 |
+
///
|
755 |
+
/// Example:
|
756 |
+
/// ```
|
757 |
+
/// namespace F = torch::nn::functional;
|
758 |
+
/// F::cross_entropy(input, target,
|
759 |
+
/// F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean));
|
760 |
+
/// ```
|
761 |
+
using CrossEntropyFuncOptions = CrossEntropyLossOptions;
|
762 |
+
} // namespace functional
|
763 |
+
|
764 |
+
// ============================================================================
|
765 |
+
|
766 |
+
/// Options for the `BCEWithLogitsLoss` module.
|
767 |
+
///
|
768 |
+
/// Example:
|
769 |
+
/// ```
|
770 |
+
/// BCEWithLogitsLoss
|
771 |
+
/// model(BCEWithLogitsLossOptions().reduction(torch::kNone).weight(weight));
|
772 |
+
/// ```
|
773 |
+
struct TORCH_API BCEWithLogitsLossOptions {
|
774 |
+
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
|
775 |
+
reduction_t;
|
776 |
+
/// A manual rescaling weight given to the loss of each batch element.
|
777 |
+
/// If given, has to be a Tensor of size `nbatch`.
|
778 |
+
TORCH_ARG(Tensor, weight) = {};
|
779 |
+
/// Specifies the reduction to apply to the output. Default: Mean
|
780 |
+
TORCH_ARG(reduction_t, reduction) = torch::kMean;
|
781 |
+
/// A weight of positive examples.
|
782 |
+
/// Must be a vector with length equal to the number of classes.
|
783 |
+
TORCH_ARG(Tensor, pos_weight) = {};
|
784 |
+
};
|
785 |
+
|
786 |
+
namespace functional {
|
787 |
+
/// Options for `torch::nn::functional::binary_cross_entropy_with_logits`.
|
788 |
+
///
|
789 |
+
/// See the documentation for `torch::nn::BCEWithLogitsLossOptions` class to
|
790 |
+
/// learn what arguments are supported.
|
791 |
+
///
|
792 |
+
/// Example:
|
793 |
+
/// ```
|
794 |
+
/// namespace F = torch::nn::functional;
|
795 |
+
/// F::binary_cross_entropy_with_logits(input, target,
|
796 |
+
/// F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum));
|
797 |
+
/// ```
|
798 |
+
using BinaryCrossEntropyWithLogitsFuncOptions = BCEWithLogitsLossOptions;
|
799 |
+
} // namespace functional
|
800 |
+
|
801 |
+
} // namespace nn
|
802 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/normalization.h
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/types.h>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace nn {
|
10 |
+
|
11 |
+
/// Options for the `LayerNorm` module.
|
12 |
+
///
|
13 |
+
/// Example:
|
14 |
+
/// ```
|
15 |
+
/// LayerNorm model(LayerNormOptions({2,
|
16 |
+
/// 2}).elementwise_affine(false).eps(2e-5));
|
17 |
+
/// ```
|
18 |
+
struct TORCH_API LayerNormOptions {
|
19 |
+
/* implicit */ LayerNormOptions(std::vector<int64_t> normalized_shape);
|
20 |
+
/// input shape from an expected input.
|
21 |
+
TORCH_ARG(std::vector<int64_t>, normalized_shape);
|
22 |
+
/// a value added to the denominator for numerical stability. ``Default:
|
23 |
+
/// 1e-5``.
|
24 |
+
TORCH_ARG(double, eps) = 1e-5;
|
25 |
+
/// a boolean value that when set to ``true``, this module
|
26 |
+
/// has learnable per-element affine parameters initialized to ones (for
|
27 |
+
/// weights) and zeros (for biases). ``Default: true``.
|
28 |
+
TORCH_ARG(bool, elementwise_affine) = true;
|
29 |
+
};
|
30 |
+
|
31 |
+
// ============================================================================
|
32 |
+
|
33 |
+
namespace functional {
|
34 |
+
|
35 |
+
/// Options for `torch::nn::functional::layer_norm`.
|
36 |
+
///
|
37 |
+
/// Example:
|
38 |
+
/// ```
|
39 |
+
/// namespace F = torch::nn::functional;
|
40 |
+
/// F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5));
|
41 |
+
/// ```
|
42 |
+
struct TORCH_API LayerNormFuncOptions {
|
43 |
+
/* implicit */ LayerNormFuncOptions(std::vector<int64_t> normalized_shape);
|
44 |
+
/// input shape from an expected input.
|
45 |
+
TORCH_ARG(std::vector<int64_t>, normalized_shape);
|
46 |
+
|
47 |
+
TORCH_ARG(Tensor, weight) = {};
|
48 |
+
|
49 |
+
TORCH_ARG(Tensor, bias) = {};
|
50 |
+
|
51 |
+
/// a value added to the denominator for numerical stability. ``Default:
|
52 |
+
/// 1e-5``.
|
53 |
+
TORCH_ARG(double, eps) = 1e-5;
|
54 |
+
};
|
55 |
+
|
56 |
+
} // namespace functional
|
57 |
+
|
58 |
+
// ============================================================================
|
59 |
+
|
60 |
+
/// Options for the `LocalResponseNorm` module.
|
61 |
+
///
|
62 |
+
/// Example:
|
63 |
+
/// ```
|
64 |
+
/// LocalResponseNorm
|
65 |
+
/// model(LocalResponseNormOptions(2).alpha(0.0002).beta(0.85).k(2.));
|
66 |
+
/// ```
|
67 |
+
struct TORCH_API LocalResponseNormOptions {
|
68 |
+
/* implicit */ LocalResponseNormOptions(int64_t size) : size_(size) {}
|
69 |
+
/// amount of neighbouring channels used for normalization
|
70 |
+
TORCH_ARG(int64_t, size);
|
71 |
+
|
72 |
+
/// multiplicative factor. Default: 1e-4
|
73 |
+
TORCH_ARG(double, alpha) = 1e-4;
|
74 |
+
|
75 |
+
/// exponent. Default: 0.75
|
76 |
+
TORCH_ARG(double, beta) = 0.75;
|
77 |
+
|
78 |
+
/// additive factor. Default: 1
|
79 |
+
TORCH_ARG(double, k) = 1.;
|
80 |
+
};
|
81 |
+
|
82 |
+
namespace functional {
|
83 |
+
/// Options for `torch::nn::functional::local_response_norm`.
|
84 |
+
///
|
85 |
+
/// See the documentation for `torch::nn::LocalResponseNormOptions` class to
|
86 |
+
/// learn what arguments are supported.
|
87 |
+
///
|
88 |
+
/// Example:
|
89 |
+
/// ```
|
90 |
+
/// namespace F = torch::nn::functional;
|
91 |
+
/// F::local_response_norm(x, F::LocalResponseNormFuncOptions(2));
|
92 |
+
/// ```
|
93 |
+
using LocalResponseNormFuncOptions = LocalResponseNormOptions;
|
94 |
+
} // namespace functional
|
95 |
+
|
96 |
+
// ============================================================================
|
97 |
+
|
98 |
+
/// Options for the `CrossMapLRN2d` module.
|
99 |
+
///
|
100 |
+
/// Example:
|
101 |
+
/// ```
|
102 |
+
/// CrossMapLRN2d model(CrossMapLRN2dOptions(3).alpha(1e-5).beta(0.1).k(10));
|
103 |
+
/// ```
|
104 |
+
struct TORCH_API CrossMapLRN2dOptions {
|
105 |
+
CrossMapLRN2dOptions(int64_t size);
|
106 |
+
|
107 |
+
TORCH_ARG(int64_t, size);
|
108 |
+
|
109 |
+
TORCH_ARG(double, alpha) = 1e-4;
|
110 |
+
|
111 |
+
TORCH_ARG(double, beta) = 0.75;
|
112 |
+
|
113 |
+
TORCH_ARG(int64_t, k) = 1;
|
114 |
+
};
|
115 |
+
|
116 |
+
// ============================================================================
|
117 |
+
|
118 |
+
namespace functional {
|
119 |
+
|
120 |
+
/// Options for `torch::nn::functional::normalize`.
|
121 |
+
///
|
122 |
+
/// Example:
|
123 |
+
/// ```
|
124 |
+
/// namespace F = torch::nn::functional;
|
125 |
+
/// F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1));
|
126 |
+
/// ```
|
127 |
+
struct TORCH_API NormalizeFuncOptions {
|
128 |
+
/// The exponent value in the norm formulation. Default: 2.0
|
129 |
+
TORCH_ARG(double, p) = 2.0;
|
130 |
+
/// The dimension to reduce. Default: 1
|
131 |
+
TORCH_ARG(int64_t, dim) = 1;
|
132 |
+
/// Small value to avoid division by zero. Default: 1e-12
|
133 |
+
TORCH_ARG(double, eps) = 1e-12;
|
134 |
+
/// the output tensor. If `out` is used, this
|
135 |
+
/// operation won't be differentiable.
|
136 |
+
TORCH_ARG(c10::optional<Tensor>, out) = c10::nullopt;
|
137 |
+
};
|
138 |
+
|
139 |
+
} // namespace functional
|
140 |
+
|
141 |
+
// ============================================================================
|
142 |
+
|
143 |
+
/// Options for the `GroupNorm` module.
|
144 |
+
///
|
145 |
+
/// Example:
|
146 |
+
/// ```
|
147 |
+
/// GroupNorm model(GroupNormOptions(2, 2).eps(2e-5).affine(false));
|
148 |
+
/// ```
|
149 |
+
struct TORCH_API GroupNormOptions {
|
150 |
+
/* implicit */ GroupNormOptions(int64_t num_groups, int64_t num_channels);
|
151 |
+
|
152 |
+
/// number of groups to separate the channels into
|
153 |
+
TORCH_ARG(int64_t, num_groups);
|
154 |
+
/// number of channels expected in input
|
155 |
+
TORCH_ARG(int64_t, num_channels);
|
156 |
+
/// a value added to the denominator for numerical stability. Default: 1e-5
|
157 |
+
TORCH_ARG(double, eps) = 1e-5;
|
158 |
+
/// a boolean value that when set to ``true``, this module
|
159 |
+
/// has learnable per-channel affine parameters initialized to ones (for
|
160 |
+
/// weights) and zeros (for biases). Default: ``true``.
|
161 |
+
TORCH_ARG(bool, affine) = true;
|
162 |
+
};
|
163 |
+
|
164 |
+
// ============================================================================
|
165 |
+
|
166 |
+
namespace functional {
|
167 |
+
|
168 |
+
/// Options for `torch::nn::functional::group_norm`.
|
169 |
+
///
|
170 |
+
/// Example:
|
171 |
+
/// ```
|
172 |
+
/// namespace F = torch::nn::functional;
|
173 |
+
/// F::group_norm(input, F::GroupNormFuncOptions(2).eps(2e-5));
|
174 |
+
/// ```
|
175 |
+
struct TORCH_API GroupNormFuncOptions {
|
176 |
+
/* implicit */ GroupNormFuncOptions(int64_t num_groups);
|
177 |
+
|
178 |
+
/// number of groups to separate the channels into
|
179 |
+
TORCH_ARG(int64_t, num_groups);
|
180 |
+
|
181 |
+
TORCH_ARG(Tensor, weight) = {};
|
182 |
+
|
183 |
+
TORCH_ARG(Tensor, bias) = {};
|
184 |
+
|
185 |
+
/// a value added to the denominator for numerical stability. Default: 1e-5
|
186 |
+
TORCH_ARG(double, eps) = 1e-5;
|
187 |
+
};
|
188 |
+
|
189 |
+
} // namespace functional
|
190 |
+
|
191 |
+
} // namespace nn
|
192 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/padding.h
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/enum.h>
|
6 |
+
#include <torch/expanding_array.h>
|
7 |
+
#include <torch/types.h>
|
8 |
+
|
9 |
+
namespace torch {
|
10 |
+
namespace nn {
|
11 |
+
|
12 |
+
/// Options for a `D`-dimensional ReflectionPad module.
|
13 |
+
template <size_t D>
|
14 |
+
struct TORCH_API ReflectionPadOptions {
|
15 |
+
ReflectionPadOptions(ExpandingArray<D * 2> padding) : padding_(padding) {}
|
16 |
+
|
17 |
+
/// The size of the padding.
|
18 |
+
/// If it is `int`, uses the same padding in all boundaries.
|
19 |
+
/// If it is a 2-`tuple` (for ReflectionPad1d), uses (padding_left,
|
20 |
+
/// padding_right). If it is a 4-`tuple` (for ReflectionPad2d), uses
|
21 |
+
/// (padding_left, padding_right, padding_top, padding_bottom). If it is a
|
22 |
+
/// 6-`tuple` (for ReflectionPad3d), uses (padding_left, padding_right,
|
23 |
+
/// padding_top, padding_bottom, padding_front, padding_back).
|
24 |
+
|
25 |
+
TORCH_ARG(ExpandingArray<D * 2>, padding);
|
26 |
+
};
|
27 |
+
|
28 |
+
/// `ReflectionPadOptions` specialized for the `ReflectionPad1d` module.
|
29 |
+
///
|
30 |
+
/// Example:
|
31 |
+
/// ```
|
32 |
+
/// ReflectionPad1d model(ReflectionPad1dOptions({3, 1}));
|
33 |
+
/// ```
|
34 |
+
using ReflectionPad1dOptions = ReflectionPadOptions<1>;
|
35 |
+
|
36 |
+
/// `ReflectionPadOptions` specialized for the `ReflectionPad2d` module.
|
37 |
+
///
|
38 |
+
/// Example:
|
39 |
+
/// ```
|
40 |
+
/// ReflectionPad2d model(ReflectionPad2dOptions({1, 1, 2, 0}));
|
41 |
+
/// ```
|
42 |
+
using ReflectionPad2dOptions = ReflectionPadOptions<2>;
|
43 |
+
|
44 |
+
/// `ReflectionPadOptions` specialized for the `ReflectionPad3d` module.
|
45 |
+
///
|
46 |
+
/// Example:
|
47 |
+
/// ```
|
48 |
+
/// ReflectionPad3d model(ReflectionPad3dOptions({1, 1, 2, 0, 1, 1}));
|
49 |
+
/// ```
|
50 |
+
using ReflectionPad3dOptions = ReflectionPadOptions<3>;
|
51 |
+
|
52 |
+
// ============================================================================
|
53 |
+
|
54 |
+
/// Options for a `D`-dimensional ReplicationPad module.
|
55 |
+
template <size_t D>
|
56 |
+
struct TORCH_API ReplicationPadOptions {
|
57 |
+
ReplicationPadOptions(ExpandingArray<D * 2> padding) : padding_(padding) {}
|
58 |
+
|
59 |
+
/// The size of the padding.
|
60 |
+
/// - If it is `int`, uses the same padding in all boundaries.
|
61 |
+
/// - If it is a 2-`tuple` (for ReplicationPad1d), uses (padding_left,
|
62 |
+
/// padding_right).
|
63 |
+
/// - If it is a 4-`tuple` (for ReplicationPad2d), uses (padding_left,
|
64 |
+
/// padding_right, padding_top, padding_bottom).
|
65 |
+
/// - If it is a 6-`tuple` (for ReplicationPad3d), uses
|
66 |
+
/// (padding_left, padding_right, padding_top, padding_bottom,
|
67 |
+
/// padding_front, padding_back).
|
68 |
+
TORCH_ARG(ExpandingArray<D * 2>, padding);
|
69 |
+
};
|
70 |
+
|
71 |
+
/// `ReplicationPadOptions` specialized for the `ReplicationPad1d` module.
|
72 |
+
///
|
73 |
+
/// Example:
|
74 |
+
/// ```
|
75 |
+
/// ReplicationPad1d model(ReplicationPad1dOptions({3, 1}));
|
76 |
+
/// ```
|
77 |
+
using ReplicationPad1dOptions = ReplicationPadOptions<1>;
|
78 |
+
|
79 |
+
/// `ReplicationPadOptions` specialized for the `ReplicationPad2d` module.
|
80 |
+
///
|
81 |
+
/// Example:
|
82 |
+
/// ```
|
83 |
+
/// ReplicationPad2d model(ReplicationPad2dOptions({1, 1, 2, 0}));
|
84 |
+
/// ```
|
85 |
+
using ReplicationPad2dOptions = ReplicationPadOptions<2>;
|
86 |
+
|
87 |
+
/// `ReplicationPadOptions` specialized for the `ReplicationPad3d` module.
|
88 |
+
///
|
89 |
+
/// Example:
|
90 |
+
/// ```
|
91 |
+
/// ReplicationPad3d model(ReplicationPad3dOptions({1, 2, 1, 2, 1, 2}));
|
92 |
+
/// ```
|
93 |
+
using ReplicationPad3dOptions = ReplicationPadOptions<3>;
|
94 |
+
|
95 |
+
// ============================================================================
|
96 |
+
|
97 |
+
template <size_t D>
|
98 |
+
struct TORCH_API ZeroPadOptions {
|
99 |
+
ZeroPadOptions(ExpandingArray<D * 2> padding) : padding_(padding) {}
|
100 |
+
|
101 |
+
/// The size of the padding.
|
102 |
+
/// - If it is `int`, uses the same padding in all boundaries.
|
103 |
+
/// - If it is a 2-`tuple` (for ZeroPad1d), uses (padding_left,
|
104 |
+
/// padding_right).
|
105 |
+
/// - If it is a 4-`tuple` (for ZeroPad2d), uses (padding_left, padding_right,
|
106 |
+
/// padding_top, padding_bottom).
|
107 |
+
/// - If it is a 6-`tuple` (for ZeroPad3d), uses
|
108 |
+
/// (padding_left, padding_right, padding_top, padding_bottom,
|
109 |
+
/// padding_front, padding_back).
|
110 |
+
TORCH_ARG(ExpandingArray<D * 2>, padding);
|
111 |
+
};
|
112 |
+
|
113 |
+
/// `ZeroPadOptions` specialized for the `ZeroPad1d` module.
|
114 |
+
///
|
115 |
+
/// Example:
|
116 |
+
/// ```
|
117 |
+
/// ConstantPad1d model(ConstantPad1dOptions({3, 1});
|
118 |
+
/// ```
|
119 |
+
using ZeroPad1dOptions = ZeroPadOptions<1>;
|
120 |
+
|
121 |
+
/// `ZeroPadOptions` specialized for the `ZeroPad2d` module.
|
122 |
+
///
|
123 |
+
/// Example:
|
124 |
+
/// ```
|
125 |
+
/// ConstantPad2d model(ConstantPad2dOptions({1, 1, 2, 0});
|
126 |
+
/// ```
|
127 |
+
using ZeroPad2dOptions = ZeroPadOptions<2>;
|
128 |
+
|
129 |
+
/// `ZeroPadOptions` specialized for the `ZeroPad3d` module.
|
130 |
+
///
|
131 |
+
/// Example:
|
132 |
+
/// ```
|
133 |
+
/// ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2});
|
134 |
+
/// ```
|
135 |
+
using ZeroPad3dOptions = ZeroPadOptions<3>;
|
136 |
+
|
137 |
+
// ============================================================================
|
138 |
+
|
139 |
+
/// Options for a `D`-dimensional ConstantPad module.
|
140 |
+
template <size_t D>
|
141 |
+
struct TORCH_API ConstantPadOptions {
|
142 |
+
ConstantPadOptions(ExpandingArray<D * 2> padding, double value)
|
143 |
+
: padding_(padding), value_(value) {}
|
144 |
+
|
145 |
+
/// The size of the padding.
|
146 |
+
/// - If it is `int`, uses the same padding in all boundaries.
|
147 |
+
/// - If it is a 2-`tuple` (for ConstantPad1d), uses (padding_left,
|
148 |
+
/// padding_right).
|
149 |
+
/// - If it is a 4-`tuple` (for ConstantPad2d), uses (padding_left,
|
150 |
+
/// padding_right, padding_top, padding_bottom).
|
151 |
+
/// - If it is a 6-`tuple` (for ConstantPad3d), uses
|
152 |
+
/// (padding_left, padding_right, padding_top, padding_bottom,
|
153 |
+
/// padding_front, padding_back).
|
154 |
+
TORCH_ARG(ExpandingArray<D * 2>, padding);
|
155 |
+
|
156 |
+
/// Fill value for constant padding.
|
157 |
+
TORCH_ARG(double, value);
|
158 |
+
};
|
159 |
+
|
160 |
+
/// `ConstantPadOptions` specialized for the `ConstantPad1d` module.
|
161 |
+
///
|
162 |
+
/// Example:
|
163 |
+
/// ```
|
164 |
+
/// ConstantPad1d model(ConstantPad1dOptions({3, 1}, 3.5));
|
165 |
+
/// ```
|
166 |
+
using ConstantPad1dOptions = ConstantPadOptions<1>;
|
167 |
+
|
168 |
+
/// `ConstantPadOptions` specialized for the `ConstantPad2d` module.
|
169 |
+
///
|
170 |
+
/// Example:
|
171 |
+
/// ```
|
172 |
+
/// ConstantPad2d model(ConstantPad2dOptions({3, 0, 2, 1}, 3.5));
|
173 |
+
/// ```
|
174 |
+
using ConstantPad2dOptions = ConstantPadOptions<2>;
|
175 |
+
|
176 |
+
/// `ConstantPadOptions` specialized for the `ConstantPad3d` module.
|
177 |
+
///
|
178 |
+
/// Example:
|
179 |
+
/// ```
|
180 |
+
/// ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2}, 3.5));
|
181 |
+
/// ```
|
182 |
+
using ConstantPad3dOptions = ConstantPadOptions<3>;
|
183 |
+
|
184 |
+
// ============================================================================
|
185 |
+
|
186 |
+
namespace functional {
|
187 |
+
|
188 |
+
/// Options for `torch::nn::functional::pad`.
|
189 |
+
///
|
190 |
+
/// Example:
|
191 |
+
/// ```
|
192 |
+
/// namespace F = torch::nn::functional;
|
193 |
+
/// F::pad(input, F::PadFuncOptions({1, 2, 2, 1, 1,
|
194 |
+
/// 2}).mode(torch::kReplicate));
|
195 |
+
/// ```
|
196 |
+
struct TORCH_API PadFuncOptions {
|
197 |
+
typedef std::variant<
|
198 |
+
enumtype::kConstant,
|
199 |
+
enumtype::kReflect,
|
200 |
+
enumtype::kReplicate,
|
201 |
+
enumtype::kCircular>
|
202 |
+
mode_t;
|
203 |
+
|
204 |
+
PadFuncOptions(std::vector<int64_t> pad);
|
205 |
+
|
206 |
+
/// m-elements tuple, where m/2 <= input dimensions and m is even.
|
207 |
+
TORCH_ARG(std::vector<int64_t>, pad);
|
208 |
+
|
209 |
+
/// "constant", "reflect", "replicate" or "circular". Default: "constant"
|
210 |
+
TORCH_ARG(mode_t, mode) = torch::kConstant;
|
211 |
+
|
212 |
+
/// fill value for "constant" padding. Default: 0
|
213 |
+
TORCH_ARG(double, value) = 0;
|
214 |
+
};
|
215 |
+
|
216 |
+
} // namespace functional
|
217 |
+
|
218 |
+
} // namespace nn
|
219 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pixelshuffle.h
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/types.h>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace nn {
|
9 |
+
|
10 |
+
/// Options for the `PixelShuffle` module.
|
11 |
+
///
|
12 |
+
/// Example:
|
13 |
+
/// ```
|
14 |
+
/// PixelShuffle model(PixelShuffleOptions(5));
|
15 |
+
/// ```
|
16 |
+
struct TORCH_API PixelShuffleOptions {
|
17 |
+
PixelShuffleOptions(int64_t upscale_factor)
|
18 |
+
: upscale_factor_(upscale_factor) {}
|
19 |
+
|
20 |
+
/// Factor to increase spatial resolution by
|
21 |
+
TORCH_ARG(int64_t, upscale_factor);
|
22 |
+
};
|
23 |
+
|
24 |
+
/// Options for the `PixelUnshuffle` module.
|
25 |
+
///
|
26 |
+
/// Example:
|
27 |
+
/// ```
|
28 |
+
/// PixelUnshuffle model(PixelUnshuffleOptions(5));
|
29 |
+
/// ```
|
30 |
+
struct TORCH_API PixelUnshuffleOptions {
|
31 |
+
/* implicit */ PixelUnshuffleOptions(int64_t downscale_factor)
|
32 |
+
: downscale_factor_(downscale_factor) {}
|
33 |
+
|
34 |
+
/// Factor to decrease spatial resolution by
|
35 |
+
TORCH_ARG(int64_t, downscale_factor);
|
36 |
+
};
|
37 |
+
|
38 |
+
namespace functional {
|
39 |
+
/// Options for `torch::nn::functional::pixel_shuffle`.
|
40 |
+
///
|
41 |
+
/// See the documentation for `torch::nn::PixelShuffleOptions` class to learn
|
42 |
+
/// what arguments are supported.
|
43 |
+
///
|
44 |
+
/// Example:
|
45 |
+
/// ```
|
46 |
+
/// namespace F = torch::nn::functional;
|
47 |
+
/// F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2));
|
48 |
+
/// ```
|
49 |
+
using PixelShuffleFuncOptions = PixelShuffleOptions;
|
50 |
+
|
51 |
+
/// Options for `torch::nn::functional::pixel_unshuffle`.
|
52 |
+
///
|
53 |
+
/// See the documentation for `torch::nn::PixelUnshuffleOptions` class to learn
|
54 |
+
/// what arguments are supported.
|
55 |
+
///
|
56 |
+
/// Example:
|
57 |
+
/// ```
|
58 |
+
/// namespace F = torch::nn::functional;
|
59 |
+
/// F::pixel_unshuffle(x, F::PixelUnshuffleFuncOptions(2));
|
60 |
+
/// ```
|
61 |
+
using PixelUnshuffleFuncOptions = PixelUnshuffleOptions;
|
62 |
+
} // namespace functional
|
63 |
+
|
64 |
+
} // namespace nn
|
65 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pooling.h
ADDED
@@ -0,0 +1,573 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/expanding_array.h>
|
6 |
+
#include <torch/types.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace nn {
|
10 |
+
|
11 |
+
/// Options for a `D`-dimensional avgpool module.
|
12 |
+
template <size_t D>
|
13 |
+
struct AvgPoolOptions {
|
14 |
+
AvgPoolOptions(ExpandingArray<D> kernel_size)
|
15 |
+
: kernel_size_(kernel_size), stride_(kernel_size) {}
|
16 |
+
|
17 |
+
/// the size of the window to take an average over
|
18 |
+
TORCH_ARG(ExpandingArray<D>, kernel_size);
|
19 |
+
|
20 |
+
/// the stride of the window. Default value is `kernel_size`
|
21 |
+
TORCH_ARG(ExpandingArray<D>, stride);
|
22 |
+
|
23 |
+
/// implicit zero padding to be added on both sides
|
24 |
+
TORCH_ARG(ExpandingArray<D>, padding) = 0;
|
25 |
+
|
26 |
+
/// when True, will use `ceil` instead of `floor` to compute the output shape
|
27 |
+
TORCH_ARG(bool, ceil_mode) = false;
|
28 |
+
|
29 |
+
/// when True, will include the zero-padding in the averaging calculation
|
30 |
+
TORCH_ARG(bool, count_include_pad) = true;
|
31 |
+
|
32 |
+
/// if specified, it will be used as divisor, otherwise size of the pooling
|
33 |
+
/// region will be used.
|
34 |
+
|
35 |
+
TORCH_ARG(c10::optional<int64_t>, divisor_override) = c10::nullopt;
|
36 |
+
};
|
37 |
+
|
38 |
+
/// `AvgPoolOptions` specialized for the `AvgPool1d` module.
|
39 |
+
///
|
40 |
+
/// Example:
|
41 |
+
/// ```
|
42 |
+
/// AvgPool1d model(AvgPool1dOptions(3).stride(2));
|
43 |
+
/// ```
|
44 |
+
using AvgPool1dOptions = AvgPoolOptions<1>;
|
45 |
+
|
46 |
+
/// `AvgPoolOptions` specialized for the `AvgPool2d` module.
|
47 |
+
///
|
48 |
+
/// Example:
|
49 |
+
/// ```
|
50 |
+
/// AvgPool2d model(AvgPool2dOptions({3, 2}).stride({2, 2}));
|
51 |
+
/// ```
|
52 |
+
using AvgPool2dOptions = AvgPoolOptions<2>;
|
53 |
+
|
54 |
+
/// `AvgPoolOptions` specialized for the `AvgPool3d` module.
|
55 |
+
///
|
56 |
+
/// Example:
|
57 |
+
/// ```
|
58 |
+
/// AvgPool3d model(AvgPool3dOptions(5).stride(2));
|
59 |
+
/// ```
|
60 |
+
using AvgPool3dOptions = AvgPoolOptions<3>;
|
61 |
+
|
62 |
+
namespace functional {
|
63 |
+
/// Options for `torch::nn::functional::avg_pool1d`.
|
64 |
+
///
|
65 |
+
/// See the documentation for `torch::nn::AvgPool1dOptions` class to learn what
|
66 |
+
/// arguments are supported.
|
67 |
+
///
|
68 |
+
/// Example:
|
69 |
+
/// ```
|
70 |
+
/// namespace F = torch::nn::functional;
|
71 |
+
/// F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2));
|
72 |
+
/// ```
|
73 |
+
using AvgPool1dFuncOptions = AvgPool1dOptions;
|
74 |
+
} // namespace functional
|
75 |
+
|
76 |
+
namespace functional {
|
77 |
+
/// Options for `torch::nn::functional::avg_pool2d`.
|
78 |
+
///
|
79 |
+
/// See the documentation for `torch::nn::AvgPool2dOptions` class to learn what
|
80 |
+
/// arguments are supported.
|
81 |
+
///
|
82 |
+
/// Example:
|
83 |
+
/// ```
|
84 |
+
/// namespace F = torch::nn::functional;
|
85 |
+
/// F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2));
|
86 |
+
/// ```
|
87 |
+
using AvgPool2dFuncOptions = AvgPool2dOptions;
|
88 |
+
} // namespace functional
|
89 |
+
|
90 |
+
namespace functional {
|
91 |
+
/// Options for `torch::nn::functional::avg_pool3d`.
|
92 |
+
///
|
93 |
+
/// See the documentation for `torch::nn::AvgPool3dOptions` class to learn what
|
94 |
+
/// arguments are supported.
|
95 |
+
///
|
96 |
+
/// Example:
|
97 |
+
/// ```
|
98 |
+
/// namespace F = torch::nn::functional;
|
99 |
+
/// F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2));
|
100 |
+
/// ```
|
101 |
+
using AvgPool3dFuncOptions = AvgPool3dOptions;
|
102 |
+
} // namespace functional
|
103 |
+
|
104 |
+
// ============================================================================
|
105 |
+
|
106 |
+
/// Options for a `D`-dimensional maxpool module.
|
107 |
+
template <size_t D>
|
108 |
+
struct MaxPoolOptions {
|
109 |
+
MaxPoolOptions(ExpandingArray<D> kernel_size)
|
110 |
+
: kernel_size_(kernel_size), stride_(kernel_size) {}
|
111 |
+
|
112 |
+
/// the size of the window to take a max over
|
113 |
+
TORCH_ARG(ExpandingArray<D>, kernel_size);
|
114 |
+
|
115 |
+
/// the stride of the window. Default value is `kernel_size
|
116 |
+
TORCH_ARG(ExpandingArray<D>, stride);
|
117 |
+
|
118 |
+
/// implicit zero padding to be added on both sides
|
119 |
+
TORCH_ARG(ExpandingArray<D>, padding) = 0;
|
120 |
+
|
121 |
+
/// a parameter that controls the stride of elements in the window
|
122 |
+
TORCH_ARG(ExpandingArray<D>, dilation) = 1;
|
123 |
+
|
124 |
+
/// when True, will use `ceil` instead of `floor` to compute the output shape
|
125 |
+
TORCH_ARG(bool, ceil_mode) = false;
|
126 |
+
};
|
127 |
+
|
128 |
+
/// `MaxPoolOptions` specialized for the `MaxPool1d` module.
|
129 |
+
///
|
130 |
+
/// Example:
|
131 |
+
/// ```
|
132 |
+
/// MaxPool1d model(MaxPool1dOptions(3).stride(2));
|
133 |
+
/// ```
|
134 |
+
using MaxPool1dOptions = MaxPoolOptions<1>;
|
135 |
+
|
136 |
+
/// `MaxPoolOptions` specialized for the `MaxPool2d` module.
|
137 |
+
///
|
138 |
+
/// Example:
|
139 |
+
/// ```
|
140 |
+
/// MaxPool2d model(MaxPool2dOptions({3, 2}).stride({2, 2}));
|
141 |
+
/// ```
|
142 |
+
using MaxPool2dOptions = MaxPoolOptions<2>;
|
143 |
+
|
144 |
+
/// `MaxPoolOptions` specialized for the `MaxPool3d` module.
|
145 |
+
///
|
146 |
+
/// Example:
|
147 |
+
/// ```
|
148 |
+
/// MaxPool3d model(MaxPool3dOptions(3).stride(2));
|
149 |
+
/// ```
|
150 |
+
using MaxPool3dOptions = MaxPoolOptions<3>;
|
151 |
+
|
152 |
+
namespace functional {
|
153 |
+
/// Options for `torch::nn::functional::max_pool1d` and
|
154 |
+
/// `torch::nn::functional::max_pool1d_with_indices`.
|
155 |
+
///
|
156 |
+
/// Example:
|
157 |
+
/// ```
|
158 |
+
/// namespace F = torch::nn::functional;
|
159 |
+
/// F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2));
|
160 |
+
/// ```
|
161 |
+
using MaxPool1dFuncOptions = MaxPool1dOptions;
|
162 |
+
} // namespace functional
|
163 |
+
|
164 |
+
namespace functional {
|
165 |
+
/// Options for `torch::nn::functional::max_pool2d` and
|
166 |
+
/// `torch::nn::functional::max_pool2d_with_indices`.
|
167 |
+
///
|
168 |
+
/// Example:
|
169 |
+
/// ```
|
170 |
+
/// namespace F = torch::nn::functional;
|
171 |
+
/// F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2));
|
172 |
+
/// ```
|
173 |
+
using MaxPool2dFuncOptions = MaxPool2dOptions;
|
174 |
+
} // namespace functional
|
175 |
+
|
176 |
+
namespace functional {
|
177 |
+
/// Options for `torch::nn::functional::max_pool3d` and
|
178 |
+
/// `torch::nn::functional::max_pool3d_with_indices`.
|
179 |
+
///
|
180 |
+
/// Example:
|
181 |
+
/// ```
|
182 |
+
/// namespace F = torch::nn::functional;
|
183 |
+
/// F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2));
|
184 |
+
/// ```
|
185 |
+
using MaxPool3dFuncOptions = MaxPool3dOptions;
|
186 |
+
} // namespace functional
|
187 |
+
|
188 |
+
// ============================================================================
|
189 |
+
|
190 |
+
/// Options for a `D`-dimensional adaptive maxpool module.
|
191 |
+
template <typename output_size_t>
|
192 |
+
struct AdaptiveMaxPoolOptions {
|
193 |
+
AdaptiveMaxPoolOptions(output_size_t output_size)
|
194 |
+
: output_size_(output_size) {}
|
195 |
+
|
196 |
+
/// the target output size
|
197 |
+
TORCH_ARG(output_size_t, output_size);
|
198 |
+
};
|
199 |
+
|
200 |
+
/// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool1d` module.
|
201 |
+
///
|
202 |
+
/// Example:
|
203 |
+
/// ```
|
204 |
+
/// AdaptiveMaxPool1d model(AdaptiveMaxPool1dOptions(3));
|
205 |
+
/// ```
|
206 |
+
using AdaptiveMaxPool1dOptions = AdaptiveMaxPoolOptions<ExpandingArray<1>>;
|
207 |
+
|
208 |
+
/// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool2d` module.
|
209 |
+
///
|
210 |
+
/// Example:
|
211 |
+
/// ```
|
212 |
+
/// AdaptiveMaxPool2d model(AdaptiveMaxPool2dOptions({3, 2}));
|
213 |
+
/// ```
|
214 |
+
using AdaptiveMaxPool2dOptions =
|
215 |
+
AdaptiveMaxPoolOptions<ExpandingArrayWithOptionalElem<2>>;
|
216 |
+
|
217 |
+
/// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool3d` module.
|
218 |
+
///
|
219 |
+
/// Example:
|
220 |
+
/// ```
|
221 |
+
/// AdaptiveMaxPool3d model(AdaptiveMaxPool3dOptions(3));
|
222 |
+
/// ```
|
223 |
+
using AdaptiveMaxPool3dOptions =
|
224 |
+
AdaptiveMaxPoolOptions<ExpandingArrayWithOptionalElem<3>>;
|
225 |
+
|
226 |
+
namespace functional {
|
227 |
+
/// Options for `torch::nn::functional::adaptive_max_pool1d` and
|
228 |
+
/// `torch::nn::functional::adaptive_max_pool1d_with_indices`
|
229 |
+
///
|
230 |
+
/// Example:
|
231 |
+
/// ```
|
232 |
+
/// namespace F = torch::nn::functional;
|
233 |
+
/// F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3));
|
234 |
+
/// ```
|
235 |
+
using AdaptiveMaxPool1dFuncOptions = AdaptiveMaxPool1dOptions;
|
236 |
+
} // namespace functional
|
237 |
+
|
238 |
+
namespace functional {
|
239 |
+
/// Options for `torch::nn::functional::adaptive_max_pool2d` and
|
240 |
+
/// `torch::nn::functional::adaptive_max_pool2d_with_indices`
|
241 |
+
///
|
242 |
+
/// Example:
|
243 |
+
/// ```
|
244 |
+
/// namespace F = torch::nn::functional;
|
245 |
+
/// F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3));
|
246 |
+
/// ```
|
247 |
+
using AdaptiveMaxPool2dFuncOptions = AdaptiveMaxPool2dOptions;
|
248 |
+
} // namespace functional
|
249 |
+
|
250 |
+
namespace functional {
|
251 |
+
/// Options for `torch::nn::functional::adaptive_max_pool3d` and
|
252 |
+
/// `torch::nn::functional::adaptive_max_pool3d_with_indices`
|
253 |
+
///
|
254 |
+
/// Example:
|
255 |
+
/// ```
|
256 |
+
/// namespace F = torch::nn::functional;
|
257 |
+
/// F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3));
|
258 |
+
/// ```
|
259 |
+
using AdaptiveMaxPool3dFuncOptions = AdaptiveMaxPool3dOptions;
|
260 |
+
} // namespace functional
|
261 |
+
|
262 |
+
// ============================================================================
|
263 |
+
|
264 |
+
/// Options for a `D`-dimensional adaptive avgpool module.
|
265 |
+
template <typename output_size_t>
|
266 |
+
struct AdaptiveAvgPoolOptions {
|
267 |
+
AdaptiveAvgPoolOptions(output_size_t output_size)
|
268 |
+
: output_size_(output_size) {}
|
269 |
+
|
270 |
+
/// the target output size
|
271 |
+
TORCH_ARG(output_size_t, output_size);
|
272 |
+
};
|
273 |
+
|
274 |
+
/// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool1d` module.
|
275 |
+
///
|
276 |
+
/// Example:
|
277 |
+
/// ```
|
278 |
+
/// AdaptiveAvgPool1d model(AdaptiveAvgPool1dOptions(5));
|
279 |
+
/// ```
|
280 |
+
using AdaptiveAvgPool1dOptions = AdaptiveAvgPoolOptions<ExpandingArray<1>>;
|
281 |
+
|
282 |
+
/// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool2d` module.
|
283 |
+
///
|
284 |
+
/// Example:
|
285 |
+
/// ```
|
286 |
+
/// AdaptiveAvgPool2d model(AdaptiveAvgPool2dOptions({3, 2}));
|
287 |
+
/// ```
|
288 |
+
using AdaptiveAvgPool2dOptions =
|
289 |
+
AdaptiveAvgPoolOptions<ExpandingArrayWithOptionalElem<2>>;
|
290 |
+
|
291 |
+
/// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool3d` module.
|
292 |
+
///
|
293 |
+
/// Example:
|
294 |
+
/// ```
|
295 |
+
/// AdaptiveAvgPool3d model(AdaptiveAvgPool3dOptions(3));
|
296 |
+
/// ```
|
297 |
+
using AdaptiveAvgPool3dOptions =
|
298 |
+
AdaptiveAvgPoolOptions<ExpandingArrayWithOptionalElem<3>>;
|
299 |
+
|
300 |
+
namespace functional {
|
301 |
+
/// Options for `torch::nn::functional::adaptive_avg_pool1d`.
|
302 |
+
///
|
303 |
+
/// See the documentation for `torch::nn::AdaptiveAvgPool1dOptions` class to
|
304 |
+
/// learn what arguments are supported.
|
305 |
+
///
|
306 |
+
/// Example:
|
307 |
+
/// ```
|
308 |
+
/// namespace F = torch::nn::functional;
|
309 |
+
/// F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3));
|
310 |
+
/// ```
|
311 |
+
using AdaptiveAvgPool1dFuncOptions = AdaptiveAvgPool1dOptions;
|
312 |
+
} // namespace functional
|
313 |
+
|
314 |
+
namespace functional {
|
315 |
+
/// Options for `torch::nn::functional::adaptive_avg_pool2d`.
|
316 |
+
///
|
317 |
+
/// See the documentation for `torch::nn::AdaptiveAvgPool2dOptions` class to
|
318 |
+
/// learn what arguments are supported.
|
319 |
+
///
|
320 |
+
/// Example:
|
321 |
+
/// ```
|
322 |
+
/// namespace F = torch::nn::functional;
|
323 |
+
/// F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3));
|
324 |
+
/// ```
|
325 |
+
using AdaptiveAvgPool2dFuncOptions = AdaptiveAvgPool2dOptions;
|
326 |
+
} // namespace functional
|
327 |
+
|
328 |
+
namespace functional {
|
329 |
+
/// Options for `torch::nn::functional::adaptive_avg_pool3d`.
|
330 |
+
///
|
331 |
+
/// See the documentation for `torch::nn::AdaptiveAvgPool3dOptions` class to
|
332 |
+
/// learn what arguments are supported.
|
333 |
+
///
|
334 |
+
/// Example:
|
335 |
+
/// ```
|
336 |
+
/// namespace F = torch::nn::functional;
|
337 |
+
/// F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3));
|
338 |
+
/// ```
|
339 |
+
using AdaptiveAvgPool3dFuncOptions = AdaptiveAvgPool3dOptions;
|
340 |
+
} // namespace functional
|
341 |
+
|
342 |
+
// ============================================================================
|
343 |
+
|
344 |
+
/// Options for a `D`-dimensional maxunpool module.
|
345 |
+
template <size_t D>
|
346 |
+
struct MaxUnpoolOptions {
|
347 |
+
MaxUnpoolOptions(ExpandingArray<D> kernel_size)
|
348 |
+
: kernel_size_(kernel_size), stride_(kernel_size) {}
|
349 |
+
|
350 |
+
/// the size of the window to take a max over
|
351 |
+
TORCH_ARG(ExpandingArray<D>, kernel_size);
|
352 |
+
|
353 |
+
/// the stride of the window. Default value is `kernel_size
|
354 |
+
TORCH_ARG(ExpandingArray<D>, stride);
|
355 |
+
|
356 |
+
/// implicit zero padding to be added on both sides
|
357 |
+
TORCH_ARG(ExpandingArray<D>, padding) = 0;
|
358 |
+
};
|
359 |
+
|
360 |
+
/// `MaxUnpoolOptions` specialized for the `MaxUnpool1d` module.
|
361 |
+
///
|
362 |
+
/// Example:
|
363 |
+
/// ```
|
364 |
+
/// MaxUnpool1d model(MaxUnpool1dOptions(3).stride(2).padding(1));
|
365 |
+
/// ```
|
366 |
+
using MaxUnpool1dOptions = MaxUnpoolOptions<1>;
|
367 |
+
|
368 |
+
/// `MaxUnpoolOptions` specialized for the `MaxUnpool2d` module.
|
369 |
+
///
|
370 |
+
/// Example:
|
371 |
+
/// ```
|
372 |
+
/// MaxUnpool2d model(MaxUnpool2dOptions(3).stride(2).padding(1));
|
373 |
+
/// ```
|
374 |
+
using MaxUnpool2dOptions = MaxUnpoolOptions<2>;
|
375 |
+
|
376 |
+
/// `MaxUnpoolOptions` specialized for the `MaxUnpool3d` module.
|
377 |
+
///
|
378 |
+
/// Example:
|
379 |
+
/// ```
|
380 |
+
/// MaxUnpool3d model(MaxUnpool3dOptions(3).stride(2).padding(1));
|
381 |
+
/// ```
|
382 |
+
using MaxUnpool3dOptions = MaxUnpoolOptions<3>;
|
383 |
+
|
384 |
+
// ============================================================================
|
385 |
+
|
386 |
+
namespace functional {
|
387 |
+
|
388 |
+
/// Options for a `D`-dimensional maxunpool functional.
|
389 |
+
template <size_t D>
|
390 |
+
struct MaxUnpoolFuncOptions {
|
391 |
+
MaxUnpoolFuncOptions(ExpandingArray<D> kernel_size)
|
392 |
+
: kernel_size_(kernel_size), stride_(kernel_size) {}
|
393 |
+
|
394 |
+
/// the size of the window to take a max over
|
395 |
+
TORCH_ARG(ExpandingArray<D>, kernel_size);
|
396 |
+
|
397 |
+
/// the stride of the window. Default value is `kernel_size
|
398 |
+
TORCH_ARG(ExpandingArray<D>, stride);
|
399 |
+
|
400 |
+
/// implicit zero padding to be added on both sides
|
401 |
+
TORCH_ARG(ExpandingArray<D>, padding) = 0;
|
402 |
+
|
403 |
+
/// the targeted output size
|
404 |
+
TORCH_ARG(c10::optional<std::vector<int64_t>>, output_size) = c10::nullopt;
|
405 |
+
};
|
406 |
+
|
407 |
+
/// `MaxUnpoolFuncOptions` specialized for
|
408 |
+
/// `torch::nn::functional::max_unpool1d`.
|
409 |
+
///
|
410 |
+
/// Example:
|
411 |
+
/// ```
|
412 |
+
/// namespace F = torch::nn::functional;
|
413 |
+
/// F::max_unpool1d(x, indices,
|
414 |
+
/// F::MaxUnpool1dFuncOptions(3).stride(2).padding(1));
|
415 |
+
/// ```
|
416 |
+
using MaxUnpool1dFuncOptions = MaxUnpoolFuncOptions<1>;
|
417 |
+
|
418 |
+
/// `MaxUnpoolFuncOptions` specialized for
|
419 |
+
/// `torch::nn::functional::max_unpool2d`.
|
420 |
+
///
|
421 |
+
/// Example:
|
422 |
+
/// ```
|
423 |
+
/// namespace F = torch::nn::functional;
|
424 |
+
/// F::max_unpool2d(x, indices,
|
425 |
+
/// F::MaxUnpool2dFuncOptions(3).stride(2).padding(1));
|
426 |
+
/// ```
|
427 |
+
using MaxUnpool2dFuncOptions = MaxUnpoolFuncOptions<2>;
|
428 |
+
|
429 |
+
/// `MaxUnpoolFuncOptions` specialized for
|
430 |
+
/// `torch::nn::functional::max_unpool3d`.
|
431 |
+
///
|
432 |
+
/// Example:
|
433 |
+
/// ```
|
434 |
+
/// namespace F = torch::nn::functional;
|
435 |
+
/// F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3));
|
436 |
+
/// ```
|
437 |
+
using MaxUnpool3dFuncOptions = MaxUnpoolFuncOptions<3>;
|
438 |
+
|
439 |
+
} // namespace functional
|
440 |
+
|
441 |
+
// ============================================================================
|
442 |
+
|
443 |
+
/// Options for a `D`-dimensional fractional maxpool module.
|
444 |
+
template <size_t D>
|
445 |
+
struct FractionalMaxPoolOptions {
|
446 |
+
FractionalMaxPoolOptions(ExpandingArray<D> kernel_size)
|
447 |
+
: kernel_size_(kernel_size) {}
|
448 |
+
|
449 |
+
/// the size of the window to take a max over
|
450 |
+
TORCH_ARG(ExpandingArray<D>, kernel_size);
|
451 |
+
|
452 |
+
/// the target output size of the image
|
453 |
+
TORCH_ARG(c10::optional<ExpandingArray<D>>, output_size) = c10::nullopt;
|
454 |
+
|
455 |
+
/// If one wants to have an output size as a ratio of the input size, this
|
456 |
+
/// option can be given. This has to be a number or tuple in the range (0, 1)
|
457 |
+
using ExpandingArrayDouble = torch::ExpandingArray<D, double>;
|
458 |
+
TORCH_ARG(c10::optional<ExpandingArrayDouble>, output_ratio) = c10::nullopt;
|
459 |
+
|
460 |
+
TORCH_ARG(torch::Tensor, _random_samples) = Tensor();
|
461 |
+
};
|
462 |
+
|
463 |
+
/// `FractionalMaxPoolOptions` specialized for the `FractionalMaxPool2d` module.
|
464 |
+
///
|
465 |
+
/// Example:
|
466 |
+
/// ```
|
467 |
+
/// FractionalMaxPool2d model(FractionalMaxPool2dOptions(5).output_size(1));
|
468 |
+
/// ```
|
469 |
+
using FractionalMaxPool2dOptions = FractionalMaxPoolOptions<2>;
|
470 |
+
|
471 |
+
/// `FractionalMaxPoolOptions` specialized for the `FractionalMaxPool3d` module.
|
472 |
+
///
|
473 |
+
/// Example:
|
474 |
+
/// ```
|
475 |
+
/// FractionalMaxPool3d model(FractionalMaxPool3dOptions(5).output_size(1));
|
476 |
+
/// ```
|
477 |
+
using FractionalMaxPool3dOptions = FractionalMaxPoolOptions<3>;
|
478 |
+
|
479 |
+
namespace functional {
|
480 |
+
/// Options for `torch::nn::functional::fractional_max_pool2d` and
|
481 |
+
/// `torch::nn::functional::fractional_max_pool2d_with_indices`
|
482 |
+
///
|
483 |
+
/// Example:
|
484 |
+
/// ```
|
485 |
+
/// namespace F = torch::nn::functional;
|
486 |
+
/// F::fractional_max_pool2d(x,
|
487 |
+
/// F::FractionalMaxPool2dFuncOptions(3).output_size(2));
|
488 |
+
/// ```
|
489 |
+
using FractionalMaxPool2dFuncOptions = FractionalMaxPool2dOptions;
|
490 |
+
} // namespace functional
|
491 |
+
|
492 |
+
namespace functional {
|
493 |
+
/// Options for `torch::nn::functional::fractional_max_pool3d` and
|
494 |
+
/// `torch::nn::functional::fractional_max_pool3d_with_indices`
|
495 |
+
///
|
496 |
+
/// Example:
|
497 |
+
/// ```
|
498 |
+
/// namespace F = torch::nn::functional;
|
499 |
+
/// F::fractional_max_pool3d(x,
|
500 |
+
/// F::FractionalMaxPool3dFuncOptions(3).output_size(2));
|
501 |
+
/// ```
|
502 |
+
using FractionalMaxPool3dFuncOptions = FractionalMaxPool3dOptions;
|
503 |
+
} // namespace functional
|
504 |
+
|
505 |
+
// ============================================================================
|
506 |
+
|
507 |
+
/// Options for a `D`-dimensional lppool module.
|
508 |
+
template <size_t D>
|
509 |
+
struct LPPoolOptions {
|
510 |
+
LPPoolOptions(double norm_type, ExpandingArray<D> kernel_size)
|
511 |
+
: norm_type_(norm_type),
|
512 |
+
kernel_size_(kernel_size),
|
513 |
+
stride_(kernel_size) {}
|
514 |
+
|
515 |
+
TORCH_ARG(double, norm_type);
|
516 |
+
|
517 |
+
// the size of the window to take an average over
|
518 |
+
TORCH_ARG(ExpandingArray<D>, kernel_size);
|
519 |
+
|
520 |
+
// the stride of the window. Default value is `kernel_size`
|
521 |
+
TORCH_ARG(ExpandingArray<D>, stride);
|
522 |
+
|
523 |
+
// when True, will use `ceil` instead of `floor` to compute the output shape
|
524 |
+
TORCH_ARG(bool, ceil_mode) = false;
|
525 |
+
};
|
526 |
+
|
527 |
+
/// `LPPoolOptions` specialized for the `LPPool1d` module.
|
528 |
+
///
|
529 |
+
/// Example:
|
530 |
+
/// ```
|
531 |
+
/// LPPool1d model(LPPool1dOptions(1, 2).stride(5).ceil_mode(true));
|
532 |
+
/// ```
|
533 |
+
using LPPool1dOptions = LPPoolOptions<1>;
|
534 |
+
|
535 |
+
/// `LPPoolOptions` specialized for the `LPPool2d` module.
|
536 |
+
///
|
537 |
+
/// Example:
|
538 |
+
/// ```
|
539 |
+
/// LPPool2d model(LPPool2dOptions(1, std::vector<int64_t>({3, 4})).stride({5,
|
540 |
+
/// 6}).ceil_mode(true));
|
541 |
+
/// ```
|
542 |
+
using LPPool2dOptions = LPPoolOptions<2>;
|
543 |
+
|
544 |
+
namespace functional {
|
545 |
+
/// Options for `torch::nn::functional::lp_pool1d`.
|
546 |
+
///
|
547 |
+
/// See the documentation for `torch::nn::LPPool1dOptions` class to learn what
|
548 |
+
/// arguments are supported.
|
549 |
+
///
|
550 |
+
/// Example:
|
551 |
+
/// ```
|
552 |
+
/// namespace F = torch::nn::functional;
|
553 |
+
/// F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2));
|
554 |
+
/// ```
|
555 |
+
using LPPool1dFuncOptions = LPPool1dOptions;
|
556 |
+
} // namespace functional
|
557 |
+
|
558 |
+
namespace functional {
|
559 |
+
/// Options for `torch::nn::functional::lp_pool2d`.
|
560 |
+
///
|
561 |
+
/// See the documentation for `torch::nn::LPPool2dOptions` class to learn what
|
562 |
+
/// arguments are supported.
|
563 |
+
///
|
564 |
+
/// Example:
|
565 |
+
/// ```
|
566 |
+
/// namespace F = torch::nn::functional;
|
567 |
+
/// F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2));
|
568 |
+
/// ```
|
569 |
+
using LPPool2dFuncOptions = LPPool2dOptions;
|
570 |
+
} // namespace functional
|
571 |
+
|
572 |
+
} // namespace nn
|
573 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/rnn.h
ADDED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/enum.h>
|
6 |
+
#include <torch/types.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace nn {
|
10 |
+
|
11 |
+
namespace detail {
|
12 |
+
|
13 |
+
/// Common options for RNN, LSTM and GRU modules.
|
14 |
+
struct TORCH_API RNNOptionsBase {
|
15 |
+
typedef std::variant<
|
16 |
+
enumtype::kLSTM,
|
17 |
+
enumtype::kGRU,
|
18 |
+
enumtype::kRNN_TANH,
|
19 |
+
enumtype::kRNN_RELU>
|
20 |
+
rnn_options_base_mode_t;
|
21 |
+
|
22 |
+
RNNOptionsBase(
|
23 |
+
rnn_options_base_mode_t mode,
|
24 |
+
int64_t input_size,
|
25 |
+
int64_t hidden_size);
|
26 |
+
|
27 |
+
TORCH_ARG(rnn_options_base_mode_t, mode);
|
28 |
+
/// The number of features of a single sample in the input sequence `x`.
|
29 |
+
TORCH_ARG(int64_t, input_size);
|
30 |
+
/// The number of features in the hidden state `h`.
|
31 |
+
TORCH_ARG(int64_t, hidden_size);
|
32 |
+
/// The number of recurrent layers (cells) to use.
|
33 |
+
TORCH_ARG(int64_t, num_layers) = 1;
|
34 |
+
/// Whether a bias term should be added to all linear operations.
|
35 |
+
TORCH_ARG(bool, bias) = true;
|
36 |
+
/// If true, the input sequence should be provided as `(batch, sequence,
|
37 |
+
/// features)`. If false (default), the expected layout is `(sequence, batch,
|
38 |
+
/// features)`.
|
39 |
+
TORCH_ARG(bool, batch_first) = false;
|
40 |
+
/// If non-zero, adds dropout with the given probability to the output of each
|
41 |
+
/// RNN layer, except the final layer.
|
42 |
+
TORCH_ARG(double, dropout) = 0.0;
|
43 |
+
/// Whether to make the RNN bidirectional.
|
44 |
+
TORCH_ARG(bool, bidirectional) = false;
|
45 |
+
/// Cell projection dimension. If 0, projections are not added. Can only be
|
46 |
+
/// used for LSTMs.
|
47 |
+
TORCH_ARG(int64_t, proj_size) = 0;
|
48 |
+
};
|
49 |
+
|
50 |
+
} // namespace detail
|
51 |
+
|
52 |
+
/// Options for the `RNN` module.
|
53 |
+
///
|
54 |
+
/// Example:
|
55 |
+
/// ```
|
56 |
+
/// RNN model(RNNOptions(128,
|
57 |
+
/// 64).num_layers(3).dropout(0.2).nonlinearity(torch::kTanh));
|
58 |
+
/// ```
|
59 |
+
struct TORCH_API RNNOptions {
|
60 |
+
typedef std::variant<enumtype::kTanh, enumtype::kReLU> nonlinearity_t;
|
61 |
+
|
62 |
+
RNNOptions(int64_t input_size, int64_t hidden_size);
|
63 |
+
|
64 |
+
/// The number of expected features in the input `x`
|
65 |
+
TORCH_ARG(int64_t, input_size);
|
66 |
+
/// The number of features in the hidden state `h`
|
67 |
+
TORCH_ARG(int64_t, hidden_size);
|
68 |
+
/// Number of recurrent layers. E.g., setting ``num_layers=2``
|
69 |
+
/// would mean stacking two RNNs together to form a `stacked RNN`,
|
70 |
+
/// with the second RNN taking in outputs of the first RNN and
|
71 |
+
/// computing the final results. Default: 1
|
72 |
+
TORCH_ARG(int64_t, num_layers) = 1;
|
73 |
+
/// The non-linearity to use. Can be either ``torch::kTanh`` or
|
74 |
+
/// ``torch::kReLU``. Default: ``torch::kTanh``
|
75 |
+
TORCH_ARG(nonlinearity_t, nonlinearity) = torch::kTanh;
|
76 |
+
/// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
|
77 |
+
/// Default: ``true``
|
78 |
+
TORCH_ARG(bool, bias) = true;
|
79 |
+
/// If ``true``, then the input and output tensors are provided
|
80 |
+
/// as `(batch, seq, feature)`. Default: ``false``
|
81 |
+
TORCH_ARG(bool, batch_first) = false;
|
82 |
+
/// If non-zero, introduces a `Dropout` layer on the outputs of each
|
83 |
+
/// RNN layer except the last layer, with dropout probability equal to
|
84 |
+
/// `dropout`. Default: 0
|
85 |
+
TORCH_ARG(double, dropout) = 0.0;
|
86 |
+
/// If ``true``, becomes a bidirectional RNN. Default: ``false``
|
87 |
+
TORCH_ARG(bool, bidirectional) = false;
|
88 |
+
};
|
89 |
+
|
90 |
+
/// Options for the `LSTM` module.
|
91 |
+
///
|
92 |
+
/// Example:
|
93 |
+
/// ```
|
94 |
+
/// LSTM model(LSTMOptions(2,
|
95 |
+
/// 4).num_layers(3).batch_first(false).bidirectional(true));
|
96 |
+
/// ```
|
97 |
+
struct TORCH_API LSTMOptions {
|
98 |
+
LSTMOptions(int64_t input_size, int64_t hidden_size);
|
99 |
+
|
100 |
+
/// The number of expected features in the input `x`
|
101 |
+
TORCH_ARG(int64_t, input_size);
|
102 |
+
/// The number of features in the hidden state `h`
|
103 |
+
TORCH_ARG(int64_t, hidden_size);
|
104 |
+
/// Number of recurrent layers. E.g., setting ``num_layers=2``
|
105 |
+
/// would mean stacking two LSTMs together to form a `stacked LSTM`,
|
106 |
+
/// with the second LSTM taking in outputs of the first LSTM and
|
107 |
+
/// computing the final results. Default: 1
|
108 |
+
TORCH_ARG(int64_t, num_layers) = 1;
|
109 |
+
/// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
|
110 |
+
/// Default: ``true``
|
111 |
+
TORCH_ARG(bool, bias) = true;
|
112 |
+
/// If ``true``, then the input and output tensors are provided
|
113 |
+
/// as (batch, seq, feature). Default: ``false``
|
114 |
+
TORCH_ARG(bool, batch_first) = false;
|
115 |
+
/// If non-zero, introduces a `Dropout` layer on the outputs of each
|
116 |
+
/// LSTM layer except the last layer, with dropout probability equal to
|
117 |
+
/// `dropout`. Default: 0
|
118 |
+
TORCH_ARG(double, dropout) = 0.0;
|
119 |
+
/// If ``true``, becomes a bidirectional LSTM. Default: ``false``
|
120 |
+
TORCH_ARG(bool, bidirectional) = false;
|
121 |
+
/// Cell projection dimension. If 0, projections are not added
|
122 |
+
TORCH_ARG(int64_t, proj_size) = 0;
|
123 |
+
};
|
124 |
+
|
125 |
+
/// Options for the `GRU` module.
|
126 |
+
///
|
127 |
+
/// Example:
|
128 |
+
/// ```
|
129 |
+
/// GRU model(GRUOptions(2,
|
130 |
+
/// 4).num_layers(3).batch_first(false).bidirectional(true));
|
131 |
+
/// ```
|
132 |
+
struct TORCH_API GRUOptions {
|
133 |
+
GRUOptions(int64_t input_size, int64_t hidden_size);
|
134 |
+
|
135 |
+
/// The number of expected features in the input `x`
|
136 |
+
TORCH_ARG(int64_t, input_size);
|
137 |
+
/// The number of features in the hidden state `h`
|
138 |
+
TORCH_ARG(int64_t, hidden_size);
|
139 |
+
/// Number of recurrent layers. E.g., setting ``num_layers=2``
|
140 |
+
/// would mean stacking two GRUs together to form a `stacked GRU`,
|
141 |
+
/// with the second GRU taking in outputs of the first GRU and
|
142 |
+
/// computing the final results. Default: 1
|
143 |
+
TORCH_ARG(int64_t, num_layers) = 1;
|
144 |
+
/// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
|
145 |
+
/// Default: ``true``
|
146 |
+
TORCH_ARG(bool, bias) = true;
|
147 |
+
/// If ``true``, then the input and output tensors are provided
|
148 |
+
/// as (batch, seq, feature). Default: ``false``
|
149 |
+
TORCH_ARG(bool, batch_first) = false;
|
150 |
+
/// If non-zero, introduces a `Dropout` layer on the outputs of each
|
151 |
+
/// GRU layer except the last layer, with dropout probability equal to
|
152 |
+
/// `dropout`. Default: 0
|
153 |
+
TORCH_ARG(double, dropout) = 0.0;
|
154 |
+
/// If ``true``, becomes a bidirectional GRU. Default: ``false``
|
155 |
+
TORCH_ARG(bool, bidirectional) = false;
|
156 |
+
};
|
157 |
+
|
158 |
+
namespace detail {
|
159 |
+
|
160 |
+
/// Common options for RNNCell, LSTMCell and GRUCell modules
|
161 |
+
struct TORCH_API RNNCellOptionsBase {
|
162 |
+
RNNCellOptionsBase(
|
163 |
+
int64_t input_size,
|
164 |
+
int64_t hidden_size,
|
165 |
+
bool bias,
|
166 |
+
int64_t num_chunks);
|
167 |
+
TORCH_ARG(int64_t, input_size);
|
168 |
+
TORCH_ARG(int64_t, hidden_size);
|
169 |
+
TORCH_ARG(bool, bias);
|
170 |
+
TORCH_ARG(int64_t, num_chunks);
|
171 |
+
};
|
172 |
+
|
173 |
+
} // namespace detail
|
174 |
+
|
175 |
+
/// Options for the `RNNCell` module.
|
176 |
+
///
|
177 |
+
/// Example:
|
178 |
+
/// ```
|
179 |
+
/// RNNCell model(RNNCellOptions(20,
|
180 |
+
/// 10).bias(false).nonlinearity(torch::kReLU));
|
181 |
+
/// ```
|
182 |
+
struct TORCH_API RNNCellOptions {
|
183 |
+
typedef std::variant<enumtype::kTanh, enumtype::kReLU> nonlinearity_t;
|
184 |
+
|
185 |
+
RNNCellOptions(int64_t input_size, int64_t hidden_size);
|
186 |
+
|
187 |
+
/// The number of expected features in the input `x`
|
188 |
+
TORCH_ARG(int64_t, input_size);
|
189 |
+
/// The number of features in the hidden state `h`
|
190 |
+
TORCH_ARG(int64_t, hidden_size);
|
191 |
+
/// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
|
192 |
+
/// Default: ``true``
|
193 |
+
TORCH_ARG(bool, bias) = true;
|
194 |
+
/// The non-linearity to use. Can be either ``torch::kTanh`` or
|
195 |
+
/// ``torch::kReLU``. Default: ``torch::kTanh``
|
196 |
+
TORCH_ARG(nonlinearity_t, nonlinearity) = torch::kTanh;
|
197 |
+
};
|
198 |
+
|
199 |
+
/// Options for the `LSTMCell` module.
|
200 |
+
///
|
201 |
+
/// Example:
|
202 |
+
/// ```
|
203 |
+
/// LSTMCell model(LSTMCellOptions(20, 10).bias(false));
|
204 |
+
/// ```
|
205 |
+
struct TORCH_API LSTMCellOptions {
|
206 |
+
LSTMCellOptions(int64_t input_size, int64_t hidden_size);
|
207 |
+
|
208 |
+
/// The number of expected features in the input `x`
|
209 |
+
TORCH_ARG(int64_t, input_size);
|
210 |
+
/// The number of features in the hidden state `h`
|
211 |
+
TORCH_ARG(int64_t, hidden_size);
|
212 |
+
/// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
|
213 |
+
/// Default: ``true``
|
214 |
+
TORCH_ARG(bool, bias) = true;
|
215 |
+
};
|
216 |
+
|
217 |
+
/// Options for the `GRUCell` module.
|
218 |
+
///
|
219 |
+
/// Example:
|
220 |
+
/// ```
|
221 |
+
/// GRUCell model(GRUCellOptions(20, 10).bias(false));
|
222 |
+
/// ```
|
223 |
+
struct TORCH_API GRUCellOptions {
|
224 |
+
GRUCellOptions(int64_t input_size, int64_t hidden_size);
|
225 |
+
|
226 |
+
/// The number of expected features in the input `x`
|
227 |
+
TORCH_ARG(int64_t, input_size);
|
228 |
+
/// The number of features in the hidden state `h`
|
229 |
+
TORCH_ARG(int64_t, hidden_size);
|
230 |
+
/// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
|
231 |
+
/// Default: ``true``
|
232 |
+
TORCH_ARG(bool, bias) = true;
|
233 |
+
};
|
234 |
+
|
235 |
+
} // namespace nn
|
236 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/arg.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <torch/enum.h>
|
6 |
+
#include <torch/types.h>
|
7 |
+
|
8 |
+
#include <torch/nn/modules/container/any.h>
|
9 |
+
#include <torch/nn/options/transformerlayer.h>
|
10 |
+
|
11 |
+
namespace torch {
|
12 |
+
namespace nn {
|
13 |
+
|
14 |
+
/// Options for the `Transformer` module
|
15 |
+
///
|
16 |
+
/// Example:
|
17 |
+
/// ```
|
18 |
+
/// TransformerOptions options;
|
19 |
+
/// TransformerOptions options(16, 4);
|
20 |
+
/// auto options = TransformerOptions().d_model(4).nhead(2).dropout(0.0);
|
21 |
+
/// ```
|
22 |
+
struct TORCH_API TransformerOptions {
|
23 |
+
// The following constructors are commonly used
|
24 |
+
// Please don't add more unless it is proved as a common usage
|
25 |
+
TransformerOptions() = default;
|
26 |
+
TransformerOptions(int64_t d_model, int64_t nhead);
|
27 |
+
TransformerOptions(
|
28 |
+
int64_t d_model,
|
29 |
+
int64_t nhead,
|
30 |
+
int64_t num_encoder_layers,
|
31 |
+
int64_t num_decoder_layers);
|
32 |
+
|
33 |
+
/// the number of expected features in the encoder/decoder inputs
|
34 |
+
/// (default=512)
|
35 |
+
TORCH_ARG(int64_t, d_model) = 512;
|
36 |
+
|
37 |
+
/// the number of heads in the multiheadattention models (default=8)
|
38 |
+
TORCH_ARG(int64_t, nhead) = 8;
|
39 |
+
|
40 |
+
/// the number of sub-encoder-layers in the encoder (default=6)
|
41 |
+
TORCH_ARG(int64_t, num_encoder_layers) = 6;
|
42 |
+
|
43 |
+
/// the number of sub-decoder-layers in the decoder (default=6)
|
44 |
+
TORCH_ARG(int64_t, num_decoder_layers) = 6;
|
45 |
+
|
46 |
+
/// the dimension of the feedforward network model (default=2048)
|
47 |
+
TORCH_ARG(int64_t, dim_feedforward) = 2048;
|
48 |
+
|
49 |
+
/// the dropout value (default=0.1)
|
50 |
+
TORCH_ARG(double, dropout) = 0.1;
|
51 |
+
|
52 |
+
/// the activation function of encoder/decoder intermediate layer
|
53 |
+
/// (default=``torch::kReLU``)
|
54 |
+
TORCH_ARG(activation_t, activation) = torch::kReLU;
|
55 |
+
|
56 |
+
/// custom encoder (default=None)
|
57 |
+
TORCH_ARG(AnyModule, custom_encoder);
|
58 |
+
|
59 |
+
/// custom decoder (default=None)
|
60 |
+
TORCH_ARG(AnyModule, custom_decoder);
|
61 |
+
};
|
62 |
+
|
63 |
+
} // namespace nn
|
64 |
+
} // namespace torch
|