diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/all.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/all.h new file mode 100644 index 0000000000000000000000000000000000000000..0d2eca31889c615c524f6689aab2898943b56521 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/all.h @@ -0,0 +1,23 @@ +#pragma once + +#if !defined(_MSC_VER) && __cplusplus < 201703L +#error C++17 or later compatible compiler is required to use PyTorch. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/arg.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/arg.h new file mode 100644 index 0000000000000000000000000000000000000000..a09362ee582dcc1a0c7cc579f40c1d424bf1c6b3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/arg.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +#define TORCH_ARG(T, name) \ + public: \ + inline auto name(const T& new_##name)->decltype(*this) { /* NOLINT */ \ + this->name##_ = new_##name; \ + return *this; \ + } \ + inline auto name(T&& new_##name)->decltype(*this) { /* NOLINT */ \ + this->name##_ = std::move(new_##name); \ + return *this; \ + } \ + inline const T& name() const noexcept { /* NOLINT */ \ + return this->name##_; \ + } \ + inline T& name() noexcept { /* NOLINT */ \ + return this->name##_; \ + } \ + \ + private: \ + T name##_ /* NOLINT */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/autograd.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/autograd.h new file mode 100644 index 0000000000000000000000000000000000000000..cf0608fa01bbf5549d81c6edc1b3e4cd82de379b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/autograd.h @@ -0,0 +1,5 @@ +#pragma once + +#include +#include +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/cuda.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..537ddf02479c26000bac02f6d3521d0ccbd90a80 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/cuda.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +#include +#include + +namespace torch { +namespace cuda { + +/// Returns the number of CUDA devices available. +size_t TORCH_API device_count(); + +/// Returns true if at least one CUDA device is available. +bool TORCH_API is_available(); + +/// Returns true if CUDA is available, and CuDNN is available. +bool TORCH_API cudnn_is_available(); + +/// Sets the seed for the current GPU. +void TORCH_API manual_seed(uint64_t seed); + +/// Sets the seed for all available GPUs. +void TORCH_API manual_seed_all(uint64_t seed); + +/// Waits for all kernels in all streams on a CUDA device to complete. +void TORCH_API synchronize(int64_t device_index = -1); + +} // namespace cuda +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data.h new file mode 100644 index 0000000000000000000000000000000000000000..ac718acd4fa3188c56c6896126b070ea9db7a174 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include +#include +#include + +// Some "exports". +namespace torch { +namespace data { +using datasets::BatchDataset; +using datasets::Dataset; +} // namespace data +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/enum.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/enum.h new file mode 100644 index 0000000000000000000000000000000000000000..debfc6c785856059bf07eccec212ef97833a555f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/enum.h @@ -0,0 +1,212 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#define TORCH_ENUM_DECLARE(name) \ + namespace torch { \ + namespace enumtype { \ + /* \ + NOTE: We need to provide the default constructor for each struct, \ + otherwise Clang 3.8 would complain: \ + ``` \ + error: default initialization of an object of const type 'const \ + enumtype::Enum1' without a user-provided default constructor \ + ``` \ + */ \ + struct k##name { \ + k##name() {} \ + }; \ + } \ + TORCH_API extern const enumtype::k##name k##name; \ + } + +#define TORCH_ENUM_DEFINE(name) \ + namespace torch { \ + const enumtype::k##name k##name; \ + } + +#define TORCH_ENUM_PRETTY_PRINT(name) \ + std::string operator()(const enumtype::k##name& v) const { \ + std::string k("k"); \ + return k + #name; \ + } + +// NOTE: Backstory on why we need the following two macros: +// +// Consider the following options class: +// +// ``` +// struct TORCH_API SomeOptions { +// typedef std::variant +// reduction_t; SomeOptions(reduction_t reduction = torch::kMean) : +// reduction_(reduction) {} +// +// TORCH_ARG(reduction_t, reduction); +// }; +// ``` +// +// and the functional that uses it: +// +// ``` +// Tensor some_functional( +// const Tensor& input, +// SomeOptions options = {}) { +// ... +// } +// ``` +// +// Normally, we would expect this to work: +// +// `F::some_functional(input, torch::kNone)` +// +// However, it throws the following error instead: +// +// ``` +// error: could not convert `torch::kNone` from `const torch::enumtype::kNone` +// to `torch::nn::SomeOptions` +// ``` +// +// To get around this problem, we explicitly provide the following constructors +// for `SomeOptions`: +// +// ``` +// SomeOptions(torch::enumtype::kNone reduction) : reduction_(torch::kNone) {} +// SomeOptions(torch::enumtype::kMean reduction) : reduction_(torch::kMean) {} +// SomeOptions(torch::enumtype::kSum reduction) : reduction_(torch::kSum) {} +// ``` +// +// so that the conversion from `torch::kNone` to `SomeOptions` would work. +// +// Note that we also provide the default constructor `SomeOptions() {}`, so that +// `SomeOptions options = {}` can work. +#define TORCH_OPTIONS_CTOR_VARIANT_ARG3( \ + OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3) \ + OPTIONS_NAME() = default; \ + OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {} \ + OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {} \ + OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {} + +#define TORCH_OPTIONS_CTOR_VARIANT_ARG4( \ + OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3, TYPE4) \ + OPTIONS_NAME() = default; \ + OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {} \ + OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {} \ + OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {} \ + OPTIONS_NAME(torch::enumtype::TYPE4 ARG_NAME) : ARG_NAME##_(torch::TYPE4) {} + +TORCH_ENUM_DECLARE(Linear) +TORCH_ENUM_DECLARE(Conv1D) +TORCH_ENUM_DECLARE(Conv2D) +TORCH_ENUM_DECLARE(Conv3D) +TORCH_ENUM_DECLARE(ConvTranspose1D) +TORCH_ENUM_DECLARE(ConvTranspose2D) +TORCH_ENUM_DECLARE(ConvTranspose3D) +TORCH_ENUM_DECLARE(Sigmoid) +TORCH_ENUM_DECLARE(Tanh) +TORCH_ENUM_DECLARE(ReLU) +TORCH_ENUM_DECLARE(GELU) +TORCH_ENUM_DECLARE(SiLU) +TORCH_ENUM_DECLARE(Mish) +TORCH_ENUM_DECLARE(LeakyReLU) +TORCH_ENUM_DECLARE(FanIn) +TORCH_ENUM_DECLARE(FanOut) +TORCH_ENUM_DECLARE(Constant) +TORCH_ENUM_DECLARE(Reflect) +TORCH_ENUM_DECLARE(Replicate) +TORCH_ENUM_DECLARE(Circular) +TORCH_ENUM_DECLARE(Nearest) +TORCH_ENUM_DECLARE(Bilinear) +TORCH_ENUM_DECLARE(Bicubic) +TORCH_ENUM_DECLARE(Trilinear) +TORCH_ENUM_DECLARE(Area) +TORCH_ENUM_DECLARE(NearestExact) +TORCH_ENUM_DECLARE(Sum) +TORCH_ENUM_DECLARE(Mean) +TORCH_ENUM_DECLARE(Max) +TORCH_ENUM_DECLARE(None) +TORCH_ENUM_DECLARE(BatchMean) +TORCH_ENUM_DECLARE(Zeros) +TORCH_ENUM_DECLARE(Border) +TORCH_ENUM_DECLARE(Reflection) +TORCH_ENUM_DECLARE(RNN_TANH) +TORCH_ENUM_DECLARE(RNN_RELU) +TORCH_ENUM_DECLARE(LSTM) +TORCH_ENUM_DECLARE(GRU) +TORCH_ENUM_DECLARE(Valid) +TORCH_ENUM_DECLARE(Same) + +namespace torch { +namespace enumtype { + +struct _compute_enum_name { + TORCH_ENUM_PRETTY_PRINT(Linear) + TORCH_ENUM_PRETTY_PRINT(Conv1D) + TORCH_ENUM_PRETTY_PRINT(Conv2D) + TORCH_ENUM_PRETTY_PRINT(Conv3D) + TORCH_ENUM_PRETTY_PRINT(ConvTranspose1D) + TORCH_ENUM_PRETTY_PRINT(ConvTranspose2D) + TORCH_ENUM_PRETTY_PRINT(ConvTranspose3D) + TORCH_ENUM_PRETTY_PRINT(Sigmoid) + TORCH_ENUM_PRETTY_PRINT(Tanh) + TORCH_ENUM_PRETTY_PRINT(ReLU) + TORCH_ENUM_PRETTY_PRINT(GELU) + TORCH_ENUM_PRETTY_PRINT(SiLU) + TORCH_ENUM_PRETTY_PRINT(Mish) + TORCH_ENUM_PRETTY_PRINT(LeakyReLU) + TORCH_ENUM_PRETTY_PRINT(FanIn) + TORCH_ENUM_PRETTY_PRINT(FanOut) + TORCH_ENUM_PRETTY_PRINT(Constant) + TORCH_ENUM_PRETTY_PRINT(Reflect) + TORCH_ENUM_PRETTY_PRINT(Replicate) + TORCH_ENUM_PRETTY_PRINT(Circular) + TORCH_ENUM_PRETTY_PRINT(Nearest) + TORCH_ENUM_PRETTY_PRINT(Bilinear) + TORCH_ENUM_PRETTY_PRINT(Bicubic) + TORCH_ENUM_PRETTY_PRINT(Trilinear) + TORCH_ENUM_PRETTY_PRINT(Area) + TORCH_ENUM_PRETTY_PRINT(NearestExact) + TORCH_ENUM_PRETTY_PRINT(Sum) + TORCH_ENUM_PRETTY_PRINT(Mean) + TORCH_ENUM_PRETTY_PRINT(Max) + TORCH_ENUM_PRETTY_PRINT(None) + TORCH_ENUM_PRETTY_PRINT(BatchMean) + TORCH_ENUM_PRETTY_PRINT(Zeros) + TORCH_ENUM_PRETTY_PRINT(Border) + TORCH_ENUM_PRETTY_PRINT(Reflection) + TORCH_ENUM_PRETTY_PRINT(RNN_TANH) + TORCH_ENUM_PRETTY_PRINT(RNN_RELU) + TORCH_ENUM_PRETTY_PRINT(LSTM) + TORCH_ENUM_PRETTY_PRINT(GRU) + TORCH_ENUM_PRETTY_PRINT(Valid) + TORCH_ENUM_PRETTY_PRINT(Same) +}; + +template +std::string get_enum_name(V variant_enum) { + return std::visit(enumtype::_compute_enum_name{}, variant_enum); +} + +template +at::Reduction::Reduction reduction_get_enum(V variant_enum) { + if (std::holds_alternative(variant_enum)) { + return at::Reduction::None; + } else if (std::holds_alternative(variant_enum)) { + return at::Reduction::Mean; + } else if (std::holds_alternative(variant_enum)) { + return at::Reduction::Sum; + } else { + TORCH_CHECK( + false, + get_enum_name(variant_enum), + " is not a valid value for reduction"); + return at::Reduction::END; + } +} + +} // namespace enumtype +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/expanding_array.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/expanding_array.h new file mode 100644 index 0000000000000000000000000000000000000000..aa4fecf4ff37c35c62d93563c440d10786368abb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/expanding_array.h @@ -0,0 +1,182 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace torch { + +/// A utility class that accepts either a container of `D`-many values, or a +/// single value, which is internally repeated `D` times. This is useful to +/// represent parameters that are multidimensional, but often equally sized in +/// all dimensions. For example, the kernel size of a 2D convolution has an `x` +/// and `y` length, but `x` and `y` are often equal. In such a case you could +/// just pass `3` to an `ExpandingArray<2>` and it would "expand" to `{3, 3}`. +template +class ExpandingArray { + public: + /// Constructs an `ExpandingArray` from an `initializer_list`. The extent of + /// the length is checked against the `ExpandingArray`'s extent parameter `D` + /// at runtime. + /*implicit*/ ExpandingArray(std::initializer_list list) + : ExpandingArray(at::ArrayRef(list)) {} + + /// Constructs an `ExpandingArray` from an `std::vector`. The extent of + /// the length is checked against the `ExpandingArray`'s extent parameter `D` + /// at runtime. + /*implicit*/ ExpandingArray(std::vector vec) + : ExpandingArray(at::ArrayRef(vec)) {} + + /// Constructs an `ExpandingArray` from an `at::ArrayRef`. The extent of + /// the length is checked against the `ExpandingArray`'s extent parameter `D` + /// at runtime. + /*implicit*/ ExpandingArray(at::ArrayRef values) { + // clang-format off + TORCH_CHECK( + values.size() == D, + "Expected ", D, " values, but instead got ", values.size()); + // clang-format on + std::copy(values.begin(), values.end(), values_.begin()); + } + + /// Constructs an `ExpandingArray` from a single value, which is repeated `D` + /// times (where `D` is the extent parameter of the `ExpandingArray`). + /*implicit*/ ExpandingArray(T single_size) { + values_.fill(single_size); + } + + /// Constructs an `ExpandingArray` from a correctly sized `std::array`. + /*implicit*/ ExpandingArray(const std::array& values) + : values_(values) {} + + /// Accesses the underlying `std::array`. + std::array& operator*() { + return values_; + } + + /// Accesses the underlying `std::array`. + const std::array& operator*() const { + return values_; + } + + /// Accesses the underlying `std::array`. + std::array* operator->() { + return &values_; + } + + /// Accesses the underlying `std::array`. + const std::array* operator->() const { + return &values_; + } + + /// Returns an `ArrayRef` to the underlying `std::array`. + operator at::ArrayRef() const { + return values_; + } + + /// Returns the extent of the `ExpandingArray`. + size_t size() const noexcept { + return D; + } + + protected: + /// The backing array. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::array values_; +}; + +template +std::ostream& operator<<( + std::ostream& stream, + const ExpandingArray& expanding_array) { + if (expanding_array.size() == 1) { + return stream << expanding_array->at(0); + } + return stream << static_cast>(expanding_array); +} + +/// A utility class that accepts either a container of `D`-many +/// `c10::optional` values, or a single `c10::optional` value, which is +/// internally repeated `D` times. It has the additional ability to accept +/// containers of the underlying type `T` and convert them to a container of +/// `c10::optional`. +template +class ExpandingArrayWithOptionalElem + : public ExpandingArray> { + public: + using ExpandingArray>::ExpandingArray; + + /// Constructs an `ExpandingArrayWithOptionalElem` from an `initializer_list` + /// of the underlying type `T`. The extent of the length is checked against + /// the `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime. + /*implicit*/ ExpandingArrayWithOptionalElem(std::initializer_list list) + : ExpandingArrayWithOptionalElem(at::ArrayRef(list)) {} + + /// Constructs an `ExpandingArrayWithOptionalElem` from an `std::vector` of + /// the underlying type `T`. The extent of the length is checked against the + /// `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime. + /*implicit*/ ExpandingArrayWithOptionalElem(std::vector vec) + : ExpandingArrayWithOptionalElem(at::ArrayRef(vec)) {} + + /// Constructs an `ExpandingArrayWithOptionalElem` from an `at::ArrayRef` of + /// the underlying type `T`. The extent of the length is checked against the + /// `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime. + /*implicit*/ ExpandingArrayWithOptionalElem(at::ArrayRef values) + : ExpandingArray>(0) { + // clang-format off + TORCH_CHECK( + values.size() == D, + "Expected ", D, " values, but instead got ", values.size()); + // clang-format on + for (const auto i : c10::irange(this->values_.size())) { + this->values_[i] = values[i]; + } + } + + /// Constructs an `ExpandingArrayWithOptionalElem` from a single value of the + /// underlying type `T`, which is repeated `D` times (where `D` is the extent + /// parameter of the `ExpandingArrayWithOptionalElem`). + /*implicit*/ ExpandingArrayWithOptionalElem(T single_size) + : ExpandingArray>(0) { + for (const auto i : c10::irange(this->values_.size())) { + this->values_[i] = single_size; + } + } + + /// Constructs an `ExpandingArrayWithOptionalElem` from a correctly sized + /// `std::array` of the underlying type `T`. + /*implicit*/ ExpandingArrayWithOptionalElem(const std::array& values) + : ExpandingArray>(0) { + for (const auto i : c10::irange(this->values_.size())) { + this->values_[i] = values[i]; + } + } +}; + +template +std::ostream& operator<<( + std::ostream& stream, + const ExpandingArrayWithOptionalElem& expanding_array_with_opt_elem) { + if (expanding_array_with_opt_elem.size() == 1) { + const auto& elem = expanding_array_with_opt_elem->at(0); + stream << (elem.has_value() ? c10::str(elem.value()) : "None"); + } else { + std::vector str_array; + for (const auto& elem : *expanding_array_with_opt_elem) { + str_array.emplace_back( + elem.has_value() ? c10::str(elem.value()) : "None"); + } + stream << at::ArrayRef(str_array); + } + return stream; +} + +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/fft.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/fft.h new file mode 100644 index 0000000000000000000000000000000000000000..86ab5050a5f7df459660512d4be5bd50a9bb68a1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/fft.h @@ -0,0 +1,389 @@ +#pragma once + +#include + +namespace torch { +namespace fft { + +/// Computes the 1 dimensional fast Fourier transform over a given dimension. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.fft. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kComplexDouble); +/// torch::fft::fft(t); +/// ``` +inline Tensor fft( + const Tensor& self, + c10::optional n = c10::nullopt, + int64_t dim = -1, + c10::optional norm = c10::nullopt) { + return torch::fft_fft_symint(self, n, dim, norm); +} + +/// Computes the 1 dimensional inverse Fourier transform over a given dimension. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.ifft. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kComplexDouble); +/// torch::fft::ifft(t); +/// ``` +inline Tensor ifft( + const Tensor& self, + c10::optional n = c10::nullopt, + int64_t dim = -1, + c10::optional norm = c10::nullopt) { + return torch::fft_ifft_symint(self, n, dim, norm); +} + +/// Computes the 2-dimensional fast Fourier transform over the given dimensions. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.fft2. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kComplexDouble); +/// torch::fft::fft2(t); +/// ``` +inline Tensor fft2( + const Tensor& self, + OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_fft2(self, s, dim, norm); +} + +/// Computes the inverse of torch.fft.fft2 +/// See https://pytorch.org/docs/master/fft.html#torch.fft.ifft2. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kComplexDouble); +/// torch::fft::ifft2(t); +/// ``` +inline Tensor ifft2( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_ifft2(self, s, dim, norm); +} + +/// Computes the N dimensional fast Fourier transform over given dimensions. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.fftn. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kComplexDouble); +/// torch::fft::fftn(t); +/// ``` +inline Tensor fftn( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef dim = c10::nullopt, + c10::optional norm = c10::nullopt) { + return torch::fft_fftn(self, s, dim, norm); +} + +/// Computes the N dimensional fast Fourier transform over given dimensions. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.ifftn. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kComplexDouble); +/// torch::fft::ifftn(t); +/// ``` +inline Tensor ifftn( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef dim = c10::nullopt, + c10::optional norm = c10::nullopt) { + return torch::fft_ifftn(self, s, dim, norm); +} + +/// Computes the 1 dimensional FFT of real input with onesided Hermitian output. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.rfft. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128); +/// auto T = torch::fft::rfft(t); +/// assert(T.is_complex() && T.numel() == 128 / 2 + 1); +/// ``` +inline Tensor rfft( + const Tensor& self, + c10::optional n = c10::nullopt, + int64_t dim = -1, + c10::optional norm = c10::nullopt) { + return torch::fft_rfft_symint(self, n, dim, norm); +} + +/// Computes the inverse of torch.fft.rfft +/// +/// The input is a onesided Hermitian Fourier domain signal, with real-valued +/// output. See https://pytorch.org/docs/master/fft.html#torch.fft.irfft +/// +/// Example: +/// ``` +/// auto T = torch::randn(128 / 2 + 1, torch::kComplexDouble); +/// auto t = torch::fft::irfft(t, /*n=*/128); +/// assert(t.is_floating_point() && T.numel() == 128); +/// ``` +inline Tensor irfft( + const Tensor& self, + c10::optional n = c10::nullopt, + int64_t dim = -1, + c10::optional norm = c10::nullopt) { + return torch::fft_irfft_symint(self, n, dim, norm); +} + +/// Computes the 2-dimensional FFT of real input. Returns a onesided Hermitian +/// output. See https://pytorch.org/docs/master/fft.html#torch.fft.rfft2 +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kDouble); +/// torch::fft::rfft2(t); +/// ``` +inline Tensor rfft2( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_rfft2(self, s, dim, norm); +} + +/// Computes the inverse of torch.fft.rfft2. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.irfft2. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kComplexDouble); +/// torch::fft::irfft2(t); +/// ``` +inline Tensor irfft2( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_irfft2(self, s, dim, norm); +} + +/// Computes the N dimensional FFT of real input with onesided Hermitian output. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.rfftn +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kDouble); +/// torch::fft::rfftn(t); +/// ``` +inline Tensor rfftn( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef dim = c10::nullopt, + c10::optional norm = c10::nullopt) { + return torch::fft_rfftn(self, s, dim, norm); +} + +/// Computes the inverse of torch.fft.rfftn. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.irfftn. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kComplexDouble); +/// torch::fft::irfftn(t); +/// ``` +inline Tensor irfftn( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef dim = c10::nullopt, + c10::optional norm = c10::nullopt) { + return torch::fft_irfftn(self, s, dim, norm); +} + +/// Computes the 1 dimensional FFT of a onesided Hermitian signal +/// +/// The input represents a Hermitian symmetric time domain signal. The returned +/// Fourier domain representation of such a signal is a real-valued. See +/// https://pytorch.org/docs/master/fft.html#torch.fft.hfft +/// +/// Example: +/// ``` +/// auto t = torch::randn(128 / 2 + 1, torch::kComplexDouble); +/// auto T = torch::fft::hfft(t, /*n=*/128); +/// assert(T.is_floating_point() && T.numel() == 128); +/// ``` +inline Tensor hfft( + const Tensor& self, + c10::optional n = c10::nullopt, + int64_t dim = -1, + c10::optional norm = c10::nullopt) { + return torch::fft_hfft_symint(self, n, dim, norm); +} + +/// Computes the inverse FFT of a real-valued Fourier domain signal. +/// +/// The output is a onesided representation of the Hermitian symmetric time +/// domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.ihfft. +/// +/// Example: +/// ``` +/// auto T = torch::randn(128, torch::kDouble); +/// auto t = torch::fft::ihfft(T); +/// assert(t.is_complex() && T.numel() == 128 / 2 + 1); +/// ``` +inline Tensor ihfft( + const Tensor& self, + c10::optional n = c10::nullopt, + int64_t dim = -1, + c10::optional norm = c10::nullopt) { + return torch::fft_ihfft_symint(self, n, dim, norm); +} + +/// Computes the 2-dimensional FFT of a Hermitian symmetric input signal. +/// +/// The input is a onesided representation of the Hermitian symmetric time +/// domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.hfft2. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 65}, torch::kComplexDouble); +/// auto T = torch::fft::hfft2(t, /*s=*/{128, 128}); +/// assert(T.is_floating_point() && T.numel() == 128 * 128); +/// ``` +inline Tensor hfft2( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_hfft2(self, s, dim, norm); +} + +/// Computes the 2-dimensional IFFT of a real input signal. +/// +/// The output is a onesided representation of the Hermitian symmetric time +/// domain signal. See +/// https://pytorch.org/docs/master/fft.html#torch.fft.ihfft2. +/// +/// Example: +/// ``` +/// auto T = torch::randn({128, 128}, torch::kDouble); +/// auto t = torch::fft::hfft2(T); +/// assert(t.is_complex() && t.size(1) == 65); +/// ``` +inline Tensor ihfft2( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_ihfft2(self, s, dim, norm); +} + +/// Computes the N-dimensional FFT of a Hermitian symmetric input signal. +/// +/// The input is a onesided representation of the Hermitian symmetric time +/// domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.hfftn. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 65}, torch::kComplexDouble); +/// auto T = torch::fft::hfftn(t, /*s=*/{128, 128}); +/// assert(T.is_floating_point() && T.numel() == 128 * 128); +/// ``` +inline Tensor hfftn( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_hfftn(self, s, dim, norm); +} + +/// Computes the N-dimensional IFFT of a real input signal. +/// +/// The output is a onesided representation of the Hermitian symmetric time +/// domain signal. See +/// https://pytorch.org/docs/master/fft.html#torch.fft.ihfftn. +/// +/// Example: +/// ``` +/// auto T = torch::randn({128, 128}, torch::kDouble); +/// auto t = torch::fft::hfft2(T); +/// assert(t.is_complex() && t.size(1) == 65); +/// ``` +inline Tensor ihfftn( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_ihfftn(self, s, dim, norm); +} + +/// Computes the discrete Fourier Transform sample frequencies for a signal of +/// size n. +/// +/// See https://pytorch.org/docs/master/fft.html#torch.fft.fftfreq +/// +/// Example: +/// ``` +/// auto frequencies = torch::fft::fftfreq(128, torch::kDouble); +/// ``` +inline Tensor fftfreq(int64_t n, double d, const TensorOptions& options = {}) { + return torch::fft_fftfreq(n, d, options); +} + +inline Tensor fftfreq(int64_t n, const TensorOptions& options = {}) { + return torch::fft_fftfreq(n, /*d=*/1.0, options); +} + +/// Computes the sample frequencies for torch.fft.rfft with a signal of size n. +/// +/// Like torch.fft.rfft, only the positive frequencies are included. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.rfftfreq +/// +/// Example: +/// ``` +/// auto frequencies = torch::fft::rfftfreq(128, torch::kDouble); +/// ``` +inline Tensor rfftfreq(int64_t n, double d, const TensorOptions& options) { + return torch::fft_rfftfreq(n, d, options); +} + +inline Tensor rfftfreq(int64_t n, const TensorOptions& options) { + return torch::fft_rfftfreq(n, /*d=*/1.0, options); +} + +/// Reorders n-dimensional FFT output to have negative frequency terms first, by +/// a torch.roll operation. +/// +/// See https://pytorch.org/docs/master/fft.html#torch.fft.fftshift +/// +/// Example: +/// ``` +/// auto x = torch::randn({127, 4}); +/// auto centred_fft = torch::fft::fftshift(torch::fft::fftn(x)); +/// ``` +inline Tensor fftshift( + const Tensor& x, + at::OptionalIntArrayRef dim = c10::nullopt) { + return torch::fft_fftshift(x, dim); +} + +/// Inverse of torch.fft.fftshift +/// +/// See https://pytorch.org/docs/master/fft.html#torch.fft.ifftshift +/// +/// Example: +/// ``` +/// auto x = torch::randn({127, 4}); +/// auto shift = torch::fft::fftshift(x) +/// auto unshift = torch::fft::ifftshift(shift); +/// assert(torch::allclose(x, unshift)); +/// ``` +inline Tensor ifftshift( + const Tensor& x, + at::OptionalIntArrayRef dim = c10::nullopt) { + return torch::fft_ifftshift(x, dim); +} + +} // namespace fft +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/imethod.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/imethod.h new file mode 100644 index 0000000000000000000000000000000000000000..1d3bdd04449de6c9a38c415c92b52e3dbbb6881b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/imethod.h @@ -0,0 +1,53 @@ +#pragma once +#include +#include + +namespace torch { + +class TORCH_API IMethod { + /* + IMethod provides a portable interface for torch methods, whether + they are backed by torchscript or python/deploy. + + This is helpful since torchscript methods provide additional information + (e.g. FunctionSchema, Graph) which aren't available in pure python methods. + + Higher level APIs should prefer depending on this interface rather + than a specific implementation of it, to promote portability and reuse, and + avoid unintentional dependencies on e.g. script methods. + + Note: This API is experimental, and may evolve. + */ + public: + using IValueList = std::vector; + using IValueMap = std::unordered_map; + + IMethod() = default; + IMethod(const IMethod&) = default; + IMethod& operator=(const IMethod&) = default; + IMethod(IMethod&&) noexcept = default; + IMethod& operator=(IMethod&&) noexcept = default; + virtual ~IMethod() = default; + + virtual c10::IValue operator()( + std::vector args, + const IValueMap& kwargs = IValueMap()) const = 0; + + virtual const std::string& name() const = 0; + + // Returns an ordered list of argument names, possible in both + // script and python methods. This is a more portable dependency + // than a ScriptMethod FunctionSchema, which has more information + // than can be generally expected from a python method. + const std::vector& getArgumentNames() const; + + protected: + virtual void setArgumentNames( + std::vector& argumentNames) const = 0; + + private: + mutable bool isArgumentNamesInitialized_{false}; + mutable std::vector argumentNames_; +}; + +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/jit.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/jit.h new file mode 100644 index 0000000000000000000000000000000000000000..703eed0d04248044edfa136324be0eb4828f76a4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/jit.h @@ -0,0 +1,36 @@ +#pragma once + +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +/// Compiles script code into an executable graph. +/// +/// Takes a string containing functions in script syntax and compiles them into +/// a module (graph). The returned module provides a `run_method` function +/// that may be used to invoke the compiled functions. +/// +/// For example: +/// \rst +/// .. code-block:: cpp +/// +/// auto module = torch::jit::compile(R"JIT( +/// def relu_script(a, b): +/// return torch.relu(a + b) +/// def test_while(a, i): +/// while i < 10: +/// a += a +/// i += 1 +/// return a +/// )JIT"); +/// IValue output = module->run_method("relu_script", a, b); +/// \endrst +TORCH_API std::shared_ptr compile(const std::string& source); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/linalg.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/linalg.h new file mode 100644 index 0000000000000000000000000000000000000000..3dd59c9f12f87794faf5f03c1aaf95a516e2aa99 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/linalg.h @@ -0,0 +1,1065 @@ +#pragma once + +#include + +namespace torch { +namespace linalg { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { + +inline Tensor cholesky(const Tensor& self) { + return torch::linalg_cholesky(self); +} + +inline Tensor cholesky_out(Tensor& result, const Tensor& self) { + return torch::linalg_cholesky_out(result, self); +} + +inline Tensor det(const Tensor& self) { + return torch::linalg_det(self); +} + +inline std::tuple slogdet(const Tensor& input) { + return torch::linalg_slogdet(input); +} + +inline std::tuple slogdet_out( + Tensor& sign, + Tensor& logabsdet, + const Tensor& input) { + return torch::linalg_slogdet_out(sign, logabsdet, input); +} + +inline std::tuple eig(const Tensor& self) { + return torch::linalg_eig(self); +} + +inline std::tuple eig_out( + Tensor& eigvals, + Tensor& eigvecs, + const Tensor& self) { + return torch::linalg_eig_out(eigvals, eigvecs, self); +} + +inline Tensor eigvals(const Tensor& self) { + return torch::linalg_eigvals(self); +} + +inline Tensor& eigvals_out(Tensor& result, const Tensor& self) { + return torch::linalg_eigvals_out(result, self); +} + +inline std::tuple eigh( + const Tensor& self, + c10::string_view uplo) { + return torch::linalg_eigh(self, uplo); +} + +inline std::tuple eigh_out( + Tensor& eigvals, + Tensor& eigvecs, + const Tensor& self, + c10::string_view uplo) { + return torch::linalg_eigh_out(eigvals, eigvecs, self, uplo); +} + +inline Tensor eigvalsh(const Tensor& self, c10::string_view uplo) { + return torch::linalg_eigvalsh(self, uplo); +} + +inline Tensor& eigvalsh_out( + Tensor& result, + const Tensor& self, + c10::string_view uplo) { + return torch::linalg_eigvalsh_out(result, self, uplo); +} + +inline Tensor householder_product(const Tensor& input, const Tensor& tau) { + return torch::linalg_householder_product(input, tau); +} + +inline Tensor& householder_product_out( + Tensor& result, + const Tensor& input, + const Tensor& tau) { + return torch::linalg_householder_product_out(result, input, tau); +} + +inline std::tuple lu_factor( + const Tensor& self, + const bool pivot) { + return torch::linalg_lu_factor(self, pivot); +} + +inline std::tuple lu_factor_out( + Tensor& LU, + Tensor& pivots, + const Tensor& self, + const bool pivot) { + return torch::linalg_lu_factor_out(LU, pivots, self, pivot); +} + +inline std::tuple lu( + const Tensor& self, + const bool pivot) { + return torch::linalg_lu(self, pivot); +} + +inline std::tuple lu_out( + Tensor& P, + Tensor& L, + Tensor& U, + const Tensor& self, + const bool pivot) { + return torch::linalg_lu_out(P, L, U, self, pivot); +} + +inline std::tuple lstsq( + const Tensor& self, + const Tensor& b, + c10::optional cond, + c10::optional driver) { + return torch::linalg_lstsq(self, b, cond, driver); +} + +inline Tensor matrix_exp(const Tensor& self) { + return torch::linalg_matrix_exp(self); +} + +inline Tensor norm( + const Tensor& self, + const optional& opt_ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return torch::linalg_norm(self, opt_ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor norm( + const Tensor& self, + c10::string_view ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return torch::linalg_norm(self, ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor& norm_out( + Tensor& result, + const Tensor& self, + const optional& opt_ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return torch::linalg_norm_out( + result, self, opt_ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor& norm_out( + Tensor& result, + const Tensor& self, + c10::string_view ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return torch::linalg_norm_out(result, self, ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor vector_norm( + const Tensor& self, + Scalar ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return torch::linalg_vector_norm(self, ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor& vector_norm_out( + Tensor& result, + const Tensor& self, + Scalar ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return torch::linalg_vector_norm_out( + result, self, ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor matrix_norm( + const Tensor& self, + const Scalar& ord, + IntArrayRef dim, + bool keepdim, + optional dtype) { + return torch::linalg_matrix_norm(self, ord, dim, keepdim, dtype); +} + +inline Tensor& matrix_norm_out( + const Tensor& self, + const Scalar& ord, + IntArrayRef dim, + bool keepdim, + optional dtype, + Tensor& result) { + return torch::linalg_matrix_norm_out(result, self, ord, dim, keepdim, dtype); +} + +inline Tensor matrix_norm( + const Tensor& self, + std::string ord, + IntArrayRef dim, + bool keepdim, + optional dtype) { + return torch::linalg_matrix_norm(self, ord, dim, keepdim, dtype); +} + +inline Tensor& matrix_norm_out( + const Tensor& self, + std::string ord, + IntArrayRef dim, + bool keepdim, + optional dtype, + Tensor& result) { + return torch::linalg_matrix_norm_out(result, self, ord, dim, keepdim, dtype); +} + +inline Tensor matrix_power(const Tensor& self, int64_t n) { + return torch::linalg_matrix_power(self, n); +} + +inline Tensor& matrix_power_out(const Tensor& self, int64_t n, Tensor& result) { + return torch::linalg_matrix_power_out(result, self, n); +} + +inline Tensor matrix_rank(const Tensor& input, double tol, bool hermitian) { + return torch::linalg_matrix_rank(input, tol, hermitian); +} + +inline Tensor matrix_rank( + const Tensor& input, + const Tensor& tol, + bool hermitian) { + return torch::linalg_matrix_rank(input, tol, hermitian); +} + +inline Tensor matrix_rank( + const Tensor& input, + c10::optional atol, + c10::optional rtol, + bool hermitian) { + return torch::linalg_matrix_rank(input, atol, rtol, hermitian); +} + +inline Tensor matrix_rank( + const Tensor& input, + const c10::optional& atol, + const c10::optional& rtol, + bool hermitian) { + return torch::linalg_matrix_rank(input, atol, rtol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + double tol, + bool hermitian) { + return torch::linalg_matrix_rank_out(result, input, tol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + const Tensor& tol, + bool hermitian) { + return torch::linalg_matrix_rank_out(result, input, tol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + c10::optional atol, + c10::optional rtol, + bool hermitian) { + return torch::linalg_matrix_rank_out(result, input, atol, rtol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + const c10::optional& atol, + const c10::optional& rtol, + bool hermitian) { + return torch::linalg_matrix_rank_out(result, input, atol, rtol, hermitian); +} + +inline Tensor multi_dot(TensorList tensors) { + return torch::linalg_multi_dot(tensors); +} + +inline Tensor& multi_dot_out(TensorList tensors, Tensor& result) { + return torch::linalg_multi_dot_out(result, tensors); +} + +inline Tensor pinv(const Tensor& input, double rcond, bool hermitian) { + return torch::linalg_pinv(input, rcond, hermitian); +} + +inline Tensor& pinv_out( + Tensor& result, + const Tensor& input, + double rcond, + bool hermitian) { + return torch::linalg_pinv_out(result, input, rcond, hermitian); +} + +inline std::tuple qr( + const Tensor& input, + c10::string_view mode) { + return torch::linalg_qr(input, mode); +} + +inline std::tuple qr_out( + Tensor& Q, + Tensor& R, + const Tensor& input, + c10::string_view mode) { + return torch::linalg_qr_out(Q, R, input, mode); +} + +inline std::tuple solve_ex( + const Tensor& input, + const Tensor& other, + bool left, + bool check_errors) { + return torch::linalg_solve_ex(input, other, left, check_errors); +} + +inline std::tuple solve_ex_out( + Tensor& result, + Tensor& info, + const Tensor& input, + const Tensor& other, + bool left, + bool check_errors) { + return torch::linalg_solve_ex_out( + result, info, input, other, left, check_errors); +} + +inline Tensor solve(const Tensor& input, const Tensor& other, bool left) { + return torch::linalg_solve(input, other, left); +} + +inline Tensor& solve_out( + Tensor& result, + const Tensor& input, + const Tensor& other, + bool left) { + return torch::linalg_solve_out(result, input, other, left); +} + +inline Tensor solve_triangular( + const Tensor& input, + const Tensor& other, + bool upper, + bool left, + bool unitriangular) { + return torch::linalg_solve_triangular( + input, other, upper, left, unitriangular); +} + +inline Tensor& solve_triangular_out( + Tensor& result, + const Tensor& input, + const Tensor& other, + bool upper, + bool left, + bool unitriangular) { + return torch::linalg_solve_triangular_out( + result, input, other, upper, left, unitriangular); +} + +inline std::tuple svd( + const Tensor& input, + bool full_matrices, + c10::optional driver) { + return torch::linalg_svd(input, full_matrices, driver); +} + +inline std::tuple svd_out( + Tensor& U, + Tensor& S, + Tensor& Vh, + const Tensor& input, + bool full_matrices, + c10::optional driver) { + return torch::linalg_svd_out(U, S, Vh, input, full_matrices, driver); +} + +inline Tensor svdvals( + const Tensor& input, + c10::optional driver) { + return torch::linalg_svdvals(input, driver); +} + +inline Tensor& svdvals_out( + Tensor& result, + const Tensor& input, + c10::optional driver) { + return torch::linalg_svdvals_out(result, input, driver); +} + +inline Tensor tensorinv(const Tensor& self, int64_t ind) { + return torch::linalg_tensorinv(self, ind); +} + +inline Tensor& tensorinv_out(Tensor& result, const Tensor& self, int64_t ind) { + return torch::linalg_tensorinv_out(result, self, ind); +} + +inline Tensor tensorsolve( + const Tensor& self, + const Tensor& other, + OptionalIntArrayRef dims) { + return torch::linalg_tensorsolve(self, other, dims); +} + +inline Tensor& tensorsolve_out( + Tensor& result, + const Tensor& self, + const Tensor& other, + OptionalIntArrayRef dims) { + return torch::linalg_tensorsolve_out(result, self, other, dims); +} + +inline Tensor inv(const Tensor& input) { + return torch::linalg_inv(input); +} + +inline Tensor& inv_out(Tensor& result, const Tensor& input) { + return torch::linalg_inv_out(result, input); +} + +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// Cholesky decomposition +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.cholesky +/// +/// Example: +/// ``` +/// auto A = torch::randn({4, 4}); +/// auto A = torch::matmul(A, A.t()); +/// auto L = torch::linalg::cholesky(A); +/// assert(torch::allclose(torch::matmul(L, L.t()), A)); +/// ``` +inline Tensor cholesky(const Tensor& self) { + return detail::cholesky(self); +} + +inline Tensor cholesky_out(Tensor& result, const Tensor& self) { + return detail::cholesky_out(result, self); +} + +// C10_DEPRECATED_MESSAGE("linalg_det is deprecated, use det instead.") +inline Tensor linalg_det(const Tensor& self) { + return detail::det(self); +} + +/// See the documentation of torch.linalg.det +inline Tensor det(const Tensor& self) { + return detail::det(self); +} + +/// Computes the sign and (natural) logarithm of the determinant +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.slogdet +inline std::tuple slogdet(const Tensor& input) { + return detail::slogdet(input); +} + +inline std::tuple slogdet_out( + Tensor& sign, + Tensor& logabsdet, + const Tensor& input) { + return detail::slogdet_out(sign, logabsdet, input); +} + +/// Computes eigenvalues and eigenvectors of non-symmetric/non-hermitian +/// matrices +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eig +inline std::tuple eig(const Tensor& self) { + return detail::eig(self); +} + +inline std::tuple eig_out( + Tensor& eigvals, + Tensor& eigvecs, + const Tensor& self) { + return detail::eig_out(eigvals, eigvecs, self); +} + +/// Computes eigenvalues of non-symmetric/non-hermitian matrices +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigvals +inline Tensor eigvals(const Tensor& self) { + return detail::eigvals(self); +} + +inline Tensor& eigvals_out(Tensor& result, const Tensor& self) { + return detail::eigvals_out(result, self); +} + +/// Computes eigenvalues and eigenvectors +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigh +inline std::tuple eigh( + const Tensor& self, + c10::string_view uplo) { + return detail::eigh(self, uplo); +} + +inline std::tuple eigh_out( + Tensor& eigvals, + Tensor& eigvecs, + const Tensor& self, + c10::string_view uplo) { + return detail::eigh_out(eigvals, eigvecs, self, uplo); +} + +/// Computes eigenvalues +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigvalsh +inline Tensor eigvalsh(const Tensor& self, c10::string_view uplo) { + return detail::eigvalsh(self, uplo); +} + +inline Tensor& eigvalsh_out( + Tensor& result, + const Tensor& self, + c10::string_view uplo) { + return detail::eigvalsh_out(result, self, uplo); +} + +/// Computes the product of Householder matrices +/// +/// See +/// https://pytorch.org/docs/master/linalg.html#torch.linalg.householder_product +inline Tensor householder_product(const Tensor& input, const Tensor& tau) { + return detail::householder_product(input, tau); +} + +inline Tensor& householder_product_out( + Tensor& result, + const Tensor& input, + const Tensor& tau) { + return detail::householder_product_out(result, input, tau); +} + +inline std::tuple lstsq( + const Tensor& self, + const Tensor& b, + c10::optional cond, + c10::optional driver) { + return detail::lstsq(self, b, cond, driver); +} + +/// Computes the matrix exponential +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_exp +inline Tensor matrix_exp(const Tensor& input) { + return detail::matrix_exp(input); +} + +// C10_DEPRECATED_MESSAGE("linalg_norm is deprecated, use norm instead.") +inline Tensor linalg_norm( + const Tensor& self, + const optional& opt_ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm(self, opt_ord, opt_dim, keepdim, opt_dtype); +} + +// C10_DEPRECATED_MESSAGE("linalg_norm is deprecated, use norm instead.") +inline Tensor linalg_norm( + const Tensor& self, + c10::string_view ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm(self, ord, opt_dim, keepdim, opt_dtype); +} + +// C10_DEPRECATED_MESSAGE("linalg_norm_out is deprecated, use norm_out +// instead.") +inline Tensor& linalg_norm_out( + Tensor& result, + const Tensor& self, + const optional& opt_ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm_out(result, self, opt_ord, opt_dim, keepdim, opt_dtype); +} + +// C10_DEPRECATED_MESSAGE("linalg_norm_out is deprecated, use norm_out +// instead.") +inline Tensor& linalg_norm_out( + Tensor& result, + const Tensor& self, + c10::string_view ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm_out(result, self, ord, opt_dim, keepdim, opt_dtype); +} + +/// Computes the LU factorization with partial pivoting +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.lu_factor +inline std::tuple lu_factor( + const Tensor& input, + const bool pivot = true) { + return detail::lu_factor(input, pivot); +} + +inline std::tuple lu_factor_out( + Tensor& LU, + Tensor& pivots, + const Tensor& self, + const bool pivot = true) { + return detail::lu_factor_out(LU, pivots, self, pivot); +} + +/// Computes the LU factorization with partial pivoting +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.lu +inline std::tuple lu( + const Tensor& input, + const bool pivot = true) { + return detail::lu(input, pivot); +} + +inline std::tuple lu_out( + Tensor& P, + Tensor& L, + Tensor& U, + const Tensor& self, + const bool pivot = true) { + return detail::lu_out(P, L, U, self, pivot); +} + +inline Tensor norm( + const Tensor& self, + const optional& opt_ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm(self, opt_ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor norm( + const Tensor& self, + std::string ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm(self, ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor& norm_out( + Tensor& result, + const Tensor& self, + const optional& opt_ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm_out(result, self, opt_ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor& norm_out( + Tensor& result, + const Tensor& self, + std::string ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm_out(result, self, ord, opt_dim, keepdim, opt_dtype); +} + +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.vector_norm +inline Tensor vector_norm( + const Tensor& self, + Scalar ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::vector_norm(self, ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor& vector_norm_out( + Tensor& result, + const Tensor& self, + Scalar ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::vector_norm_out( + result, self, ord, opt_dim, keepdim, opt_dtype); +} + +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_norm +inline Tensor matrix_norm( + const Tensor& self, + const Scalar& ord, + IntArrayRef dim, + bool keepdim, + optional dtype) { + return detail::matrix_norm(self, ord, dim, keepdim, dtype); +} + +inline Tensor& matrix_norm_out( + const Tensor& self, + const Scalar& ord, + IntArrayRef dim, + bool keepdim, + optional dtype, + Tensor& result) { + return detail::matrix_norm_out(self, ord, dim, keepdim, dtype, result); +} + +inline Tensor matrix_norm( + const Tensor& self, + std::string ord, + IntArrayRef dim, + bool keepdim, + optional dtype) { + return detail::matrix_norm(self, ord, dim, keepdim, dtype); +} + +inline Tensor& matrix_norm_out( + const Tensor& self, + std::string ord, + IntArrayRef dim, + bool keepdim, + optional dtype, + Tensor& result) { + return detail::matrix_norm_out(self, ord, dim, keepdim, dtype, result); +} + +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_power +inline Tensor matrix_power(const Tensor& self, int64_t n) { + return detail::matrix_power(self, n); +} + +inline Tensor& matrix_power_out(const Tensor& self, int64_t n, Tensor& result) { + return detail::matrix_power_out(self, n, result); +} + +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_rank +inline Tensor matrix_rank(const Tensor& input, double tol, bool hermitian) { + return detail::matrix_rank(input, tol, hermitian); +} + +inline Tensor matrix_rank( + const Tensor& input, + const Tensor& tol, + bool hermitian) { + return detail::matrix_rank(input, tol, hermitian); +} + +inline Tensor matrix_rank( + const Tensor& input, + c10::optional atol, + c10::optional rtol, + bool hermitian) { + return detail::matrix_rank(input, atol, rtol, hermitian); +} + +inline Tensor matrix_rank( + const Tensor& input, + const c10::optional& atol, + const c10::optional& rtol, + bool hermitian) { + return detail::matrix_rank(input, atol, rtol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + double tol, + bool hermitian) { + return detail::matrix_rank_out(result, input, tol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + const Tensor& tol, + bool hermitian) { + return detail::matrix_rank_out(result, input, tol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + c10::optional atol, + c10::optional rtol, + bool hermitian) { + return detail::matrix_rank_out(result, input, atol, rtol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + const c10::optional& atol, + const c10::optional& rtol, + bool hermitian) { + return detail::matrix_rank_out(result, input, atol, rtol, hermitian); +} + +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.multi_dot +inline Tensor multi_dot(TensorList tensors) { + return detail::multi_dot(tensors); +} + +inline Tensor& multi_dot_out(TensorList tensors, Tensor& result) { + return detail::multi_dot_out(tensors, result); +} + +/// Computes the pseudo-inverse +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.pinv +inline Tensor pinv( + const Tensor& input, + double rcond = 1e-15, + bool hermitian = false) { + return detail::pinv(input, rcond, hermitian); +} + +inline Tensor& pinv_out( + Tensor& result, + const Tensor& input, + double rcond = 1e-15, + bool hermitian = false) { + return detail::pinv_out(result, input, rcond, hermitian); +} + +/// Computes the QR decomposition +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.qr +inline std::tuple qr( + const Tensor& input, + c10::string_view mode = "reduced") { + // C++17 Change the initialisation to "reduced"sv + // Same for qr_out + return detail::qr(input, mode); +} + +inline std::tuple qr_out( + Tensor& Q, + Tensor& R, + const Tensor& input, + c10::string_view mode = "reduced") { + return detail::qr_out(Q, R, input, mode); +} + +/// Computes the LDL decomposition +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.ldl_factor_ex +inline std::tuple ldl_factor_ex( + const Tensor& input, + bool hermitian, + bool check_errors) { + return torch::linalg_ldl_factor_ex(input, hermitian, check_errors); +} + +inline std::tuple ldl_factor_ex_out( + Tensor& LD, + Tensor& pivots, + Tensor& info, + const Tensor& input, + bool hermitian, + bool check_errors) { + return torch::linalg_ldl_factor_ex_out( + LD, pivots, info, input, hermitian, check_errors); +} + +/// Solve a system of linear equations using the LDL decomposition +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.ldl_solve +inline Tensor ldl_solve( + const Tensor& LD, + const Tensor& pivots, + const Tensor& B, + bool hermitian) { + return torch::linalg_ldl_solve(LD, pivots, B, hermitian); +} + +inline Tensor& ldl_solve_out( + Tensor& result, + const Tensor& LD, + const Tensor& pivots, + const Tensor& B, + bool hermitian) { + return torch::linalg_ldl_solve_out(result, LD, pivots, B, hermitian); +} + +/// Solves a system linear system AX = B +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.solve_ex +inline std::tuple solve_ex( + const Tensor& input, + const Tensor& other, + bool left, + bool check_errors) { + return detail::solve_ex(input, other, left, check_errors); +} + +inline std::tuple solve_ex_out( + Tensor& result, + Tensor& info, + const Tensor& input, + const Tensor& other, + bool left, + bool check_errors) { + return detail::solve_ex_out(result, info, input, other, left, check_errors); +} + +/// Computes a tensor `x` such that `matmul(input, x) = other`. +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.solve +inline Tensor solve(const Tensor& input, const Tensor& other, bool left) { + return detail::solve(input, other, left); +} + +inline Tensor& solve_out( + Tensor& result, + const Tensor& input, + const Tensor& other, + bool left) { + return detail::solve_out(result, input, other, left); +} + +/// Computes a solution of a linear system AX = B for input = A and other = B +/// whenever A is square upper or lower triangular and does not have zeros in +/// the diagonal +/// +/// See +/// https://pytorch.org/docs/master/linalg.html#torch.linalg.solve_triangular +inline Tensor solve_triangular( + const Tensor& input, + const Tensor& other, + bool upper, + bool left, + bool unitriangular) { + return detail::solve_triangular(input, other, upper, left, unitriangular); +} + +inline Tensor& solve_triangular_out( + Tensor& result, + const Tensor& input, + const Tensor& other, + bool upper, + bool left, + bool unitriangular) { + return detail::solve_triangular_out( + result, input, other, upper, left, unitriangular); +} + +/// Computes the singular values and singular vectors +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.svd +inline std::tuple svd( + const Tensor& input, + bool full_matrices, + c10::optional driver) { + return detail::svd(input, full_matrices, driver); +} + +inline std::tuple svd_out( + Tensor& U, + Tensor& S, + Tensor& Vh, + const Tensor& input, + bool full_matrices, + c10::optional driver) { + return detail::svd_out(U, S, Vh, input, full_matrices, driver); +} + +/// Computes the singular values +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.svdvals +inline Tensor svdvals( + const Tensor& input, + c10::optional driver) { + return detail::svdvals(input, driver); +} + +inline Tensor& svdvals_out( + Tensor& result, + const Tensor& input, + c10::optional driver) { + return detail::svdvals_out(result, input, driver); +} + +/// Computes the inverse of a tensor +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.tensorinv +/// +/// Example: +/// ``` +/// auto a = torch::eye(4*6).reshape({4, 6, 8, 3}); +/// int64_t ind = 2; +/// auto ainv = torch::linalg::tensorinv(a, ind); +/// ``` +inline Tensor tensorinv(const Tensor& self, int64_t ind) { + return detail::tensorinv(self, ind); +} + +inline Tensor& tensorinv_out(Tensor& result, const Tensor& self, int64_t ind) { + return detail::tensorinv_out(result, self, ind); +} + +/// Computes a tensor `x` such that `tensordot(input, x, dims=x.dim()) = other`. +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.tensorsolve +/// +/// Example: +/// ``` +/// auto a = torch::eye(2*3*4).reshape({2*3, 4, 2, 3, 4}); +/// auto b = torch::randn(2*3, 4); +/// auto x = torch::linalg::tensorsolve(a, b); +/// ``` +inline Tensor tensorsolve( + const Tensor& input, + const Tensor& other, + OptionalIntArrayRef dims) { + return detail::tensorsolve(input, other, dims); +} + +inline Tensor& tensorsolve_out( + Tensor& result, + const Tensor& input, + const Tensor& other, + OptionalIntArrayRef dims) { + return detail::tensorsolve_out(result, input, other, dims); +} + +/// Computes a tensor `inverse_input` such that `dot(input, inverse_input) = +/// eye(input.size(0))`. +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.inv +inline Tensor inv(const Tensor& input) { + return detail::inv(input); +} + +inline Tensor& inv_out(Tensor& result, const Tensor& input) { + return detail::inv_out(result, input); +} + +} // namespace linalg +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/mps.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/mps.h new file mode 100644 index 0000000000000000000000000000000000000000..1b2eabd6832ba8a3c1d07add2e0313ae407c51da --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/mps.h @@ -0,0 +1,44 @@ +#pragma once + +#include + +#include +#include + +#ifdef __OBJC__ +#include +#include +using MTLCommandBuffer_t = id; +using DispatchQueue_t = dispatch_queue_t; +#else +using MTLCommandBuffer_t = void*; +using DispatchQueue_t = void*; +#endif + +namespace torch { +namespace mps { + +/// Returns true if MPS device is available. +bool TORCH_API is_available(); + +/// Sets the RNG seed for the MPS device. +void TORCH_API manual_seed(uint64_t seed); + +/// Waits for all streams on the MPS device to complete. +/// This blocks the calling CPU thread by using the 'waitUntilCompleted()' +/// method to wait for Metal command buffers finish executing all the +/// encoded GPU operations before returning. +void TORCH_API synchronize(); + +/// Submits the currently active command buffer to run on the MPS device. +void TORCH_API commit(); + +/// Get the current command buffer to encode the Metal commands. +MTLCommandBuffer_t TORCH_API get_command_buffer(); + +/// Get the dispatch_queue_t to synchronize encoding the custom kernels +/// with the PyTorch MPS backend. +DispatchQueue_t TORCH_API get_dispatch_queue(); + +} // namespace mps +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nested.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nested.h new file mode 100644 index 0000000000000000000000000000000000000000..d91c878348bd52d9e2dd8695e63c96d1d1dc216c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nested.h @@ -0,0 +1,95 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nested { + +/// Nested tensor +/// +/// See +/// https://pytorch.org/docs/master/nested.html#torch.nested.nested_tensor +/// +/// ``` +// implemented on python object to allow torch.nested.nested_tensor to be +// constructed with arbitrarily nested python objects - for now, only arbitrary +// python lists and lists of Tensors +// See torch/csrc/autograd/python_nested_functions_manual.cpp for Python +// implementation +// See here for C++ implementation +inline at::Tensor nested_tensor( + at::TensorList nested_tensor_data, + const at::TensorOptions& options = {}) { + auto out = at::_nested_tensor_from_tensor_list( + nested_tensor_data, + c10::typeMetaToScalarType(options.dtype()), + c10::nullopt, + options.device(), + options.pinned_memory()); + if (options.has_requires_grad() && options.requires_grad()) { + out.requires_grad_(true); + } + return out; +} + +inline at::Tensor nested_tensor( + at::ArrayRef nested_tensor_data, + const at::TensorOptions& options = {}) { + for (const auto& tdc : nested_tensor_data) { + TORCH_CHECK( + tdc.is_init_list(), + "nested_tensor() not implemented for these parameters"); + } + // Construct a TensorList using nested_tensor_data + std::vector tensor_list(nested_tensor_data.size()); + std::transform( + nested_tensor_data.begin(), + nested_tensor_data.end(), + tensor_list.begin(), + [&](const detail::TensorDataContainer& tdc) { + return tdc.convert_to_tensor(options); + }); + auto out = at::_nested_tensor_from_tensor_list( + tensor_list, + c10::typeMetaToScalarType(options.dtype()), + c10::nullopt, + options.device(), + options.pinned_memory()); + if (options.has_requires_grad() && options.requires_grad()) { + out.requires_grad_(true); + } + return out; +} + +/// As Nested Tensor +/// +/// See +/// https://pytorch.org/docs/master/nested.html#torch.nested.as_nested_tensor +/// +/// ``` +inline at::Tensor as_nested_tensor( + at::TensorList list, + c10::optional dtype = c10::nullopt, + c10::optional device = c10::nullopt) { + return at::_nested_tensor_from_tensor_list( + list, dtype, c10::nullopt, device, c10::nullopt); +} + +/// Nested to padded tensor +/// +/// See +/// https://pytorch.org/docs/master/nested.html#torch.nested.to_padded_tensor +/// +/// ``` +inline at::Tensor to_padded_tensor( + const at::Tensor& self, + double padding, + at::OptionalIntArrayRef output_size = c10::nullopt) { + return at::nested_to_padded_tensor(self, padding, output_size); +} + +} // namespace nested +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn.h new file mode 100644 index 0000000000000000000000000000000000000000..b93220b5d62a0ccf64b16a3b8aae8cb940045849 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn.h @@ -0,0 +1,10 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h new file mode 100644 index 0000000000000000000000000000000000000000..aaf30d90974b11bd97dfa7617bc78faf13ded068 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/cloneable.h @@ -0,0 +1,98 @@ +#pragma once + +#include +#include +#include + +#include +#include + +#include +#include + +namespace torch { +namespace nn { +/// The `clone()` method in the base `Module` class does not have knowledge of +/// the concrete runtime type of its subclasses. Therefore, `clone()` must +/// either be called from within the subclass, or from a base class that has +/// knowledge of the concrete type. `Cloneable` uses the CRTP to gain +/// knowledge of the subclass' static type and provide an implementation of the +/// `clone()` method. We do not want to use this pattern in the base class, +/// because then storing a module would always require templatizing it. +template +// NOLINTNEXTLINE(bugprone-exception-escape) +class Cloneable : public Module { + public: + using Module::Module; + + /// `reset()` must perform initialization of all members with reference + /// semantics, most importantly parameters, buffers and submodules. + virtual void reset() = 0; + + /// Performs a recursive "deep copy" of the `Module`, such that all parameters + /// and submodules in the cloned module are different from those in the + /// original module. + std::shared_ptr clone( + const optional& device = nullopt) const override { + NoGradGuard no_grad; + + const auto& self = static_cast(*this); + auto copy = std::make_shared(self); + copy->parameters_.clear(); + copy->buffers_.clear(); + copy->children_.clear(); + copy->reset(); + TORCH_CHECK( + copy->parameters_.size() == parameters_.size(), + "The cloned module does not have the same number of " + "parameters as the original module after calling reset(). " + "Are you sure you called register_parameter() inside reset() " + "and not the constructor?"); + for (const auto& parameter : named_parameters(/*recurse=*/false)) { + auto& tensor = *parameter; + auto data = device && tensor.device() != *device + ? tensor.to(*device) + : autograd::Variable(tensor).clone(); + copy->parameters_[parameter.key()].set_data(data); + } + TORCH_CHECK( + copy->buffers_.size() == buffers_.size(), + "The cloned module does not have the same number of " + "buffers as the original module after calling reset(). " + "Are you sure you called register_buffer() inside reset() " + "and not the constructor?"); + for (const auto& buffer : named_buffers(/*recurse=*/false)) { + auto& tensor = *buffer; + auto data = device && tensor.device() != *device + ? tensor.to(*device) + : autograd::Variable(tensor).clone(); + copy->buffers_[buffer.key()].set_data(data); + } + TORCH_CHECK( + copy->children_.size() == children_.size(), + "The cloned module does not have the same number of " + "child modules as the original module after calling reset(). " + "Are you sure you called register_module() inside reset() " + "and not the constructor?"); + for (const auto& child : children_) { + copy->children_[child.key()]->clone_(*child.value(), device); + } + return copy; + } + + private: + void clone_(Module& other, const optional& device) final { + // Here we are *pretty* certain that `other's` type is `Derived` (because it + // was registered under the same name as `this`), but you never know what + // crazy things `reset()` does, so `dynamic_cast` just to be safe. + auto clone = std::dynamic_pointer_cast(other.clone(device)); + TORCH_CHECK( + clone != nullptr, + "Attempted to clone submodule, but it is of a " + "different type than the submodule it was to be cloned into"); + static_cast(*this) = *clone; + } +}; + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h new file mode 100644 index 0000000000000000000000000000000000000000..b148edc68173f4d11cf58e042902edf3c508afff --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/activation.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/activation.h new file mode 100644 index 0000000000000000000000000000000000000000..52030da2aa9233fc8dbbe0b4da36afd3e939a006 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/activation.h @@ -0,0 +1,966 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor elu(Tensor input, double alpha, bool inplace) { + if (inplace) { + return torch::elu_(input, alpha); + } else { + return torch::elu(input, alpha); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.elu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::ELUFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true)); +/// ``` +inline Tensor elu(Tensor input, const ELUFuncOptions& options = {}) { + return detail::elu(std::move(input), options.alpha(), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor selu(Tensor input, bool inplace) { + if (inplace) { + return torch::selu_(input); + } else { + return torch::selu(input); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.selu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SELUFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::selu(input, F::SELUFuncOptions(false)); +/// ``` +inline Tensor selu(Tensor input, const SELUFuncOptions& options = {}) { + return detail::selu(std::move(input), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor hardshrink(const Tensor& input, double lambda) { + return torch::hardshrink(input, lambda); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hardshrink +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::HardshrinkFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42)); +/// ``` +inline Tensor hardshrink( + const Tensor& input, + const HardshrinkFuncOptions& options = {}) { + return detail::hardshrink(input, options.lambda()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor hardtanh( + Tensor input, + double min_val, + double max_val, + bool inplace) { + if (inplace) { + return torch::hardtanh_(input, min_val, max_val); + } else { + return torch::hardtanh(input, min_val, max_val); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hardtanh +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::HardtanhFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::hardtanh(x, +/// F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true)); +/// ``` +inline Tensor hardtanh(Tensor input, const HardtanhFuncOptions& options = {}) { + return detail::hardtanh( + std::move(input), + options.min_val(), + options.max_val(), + options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor leaky_relu(Tensor input, double negative_slope, bool inplace) { + if (inplace) { + return torch::leaky_relu_(input, negative_slope); + } else { + return torch::leaky_relu(input, negative_slope); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.leaky_relu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::LeakyReLUFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::leaky_relu(x, +/// F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true)); +/// ``` +inline Tensor leaky_relu( + Tensor input, + const LeakyReLUFuncOptions& options = {}) { + return detail::leaky_relu( + std::move(input), options.negative_slope(), options.inplace()); +} + +// ============================================================================ + +inline Tensor logsigmoid(const Tensor& input) { + return torch::log_sigmoid(input); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor gumbel_softmax( + const Tensor& logits, + double tau, + bool hard, + int dim) { + auto gumbels = + -torch::empty_like(logits).exponential_().log(); // ~Gumbel(0,1) + gumbels = (logits + gumbels) / tau; // ~Gumbel(logits, tau) + auto y_soft = gumbels.softmax(dim); + + torch::Tensor ret; + if (hard) { + // Straight through. + auto index = std::get<1>(y_soft.max(dim, /*keepdim=*/true)); + auto y_hard = torch::zeros_like(logits).scatter_(dim, index, 1.0); + ret = y_hard - y_soft.detach() + y_soft; + } else { + ret = y_soft; + } + return ret; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.gumbel_softmax +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::GumbelSoftmaxFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(-1)); +/// ``` +inline Tensor gumbel_softmax( + const Tensor& logits, + const GumbelSoftmaxFuncOptions& options = {}) { + return detail::gumbel_softmax( + logits, options.tau(), options.hard(), options.dim()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor softmax( + const Tensor& input, + int64_t dim, + c10::optional dtype) { + Tensor ret; + + if (dtype == c10::nullopt) { + ret = input.softmax(dim); + } else { + ret = input.softmax(dim, dtype); + } + + return ret; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softmax +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SoftmaxFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softmax(input, F::SoftmaxFuncOptions(1)); +/// ``` +inline Tensor softmax(const Tensor& input, const SoftmaxFuncOptions& options) { + return detail::softmax(input, options.dim(), options.dtype()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor softmin( + const Tensor& input, + int64_t dim, + c10::optional dtype) { + Tensor ret; + + if (dtype == c10::nullopt) { + ret = (-input).softmax(dim); + } else { + ret = (-input).softmax(dim, dtype); + } + + return ret; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softmin +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SoftminFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softmin(input, F::SoftminFuncOptions(1)); +/// ``` +inline Tensor softmin(const Tensor& input, const SoftminFuncOptions& options) { + return detail::softmin(input, options.dim(), options.dtype()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor log_softmax( + const Tensor& input, + int64_t dim, + c10::optional dtype) { + Tensor ret; + + if (dtype == c10::nullopt) { + ret = input.log_softmax(dim); + } else { + ret = input.log_softmax(dim, dtype); + } + + return ret; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.log_softmax +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::LogSoftmaxFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::log_softmax(input, LogSoftmaxFuncOptions(1)); +/// ``` +inline Tensor log_softmax( + const Tensor& input, + const LogSoftmaxFuncOptions& options) { + return detail::log_softmax(input, options.dim(), options.dtype()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor glu(const Tensor& input, int64_t dim) { + TORCH_CHECK( + input.dim() != 0, + "glu does not suppport scalars because halving size must be even"); + return torch::glu(input, dim); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.glu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::GLUFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::glu(input, GLUFuncOptions(1)); +/// ``` +inline Tensor glu(const Tensor& input, const GLUFuncOptions& options = {}) { + return detail::glu(input, options.dim()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor gelu(const Tensor& input, string approximate) { + return torch::gelu(input, approximate); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +inline Tensor gelu(const Tensor& input, const GELUFuncOptions& options = {}) { + return detail::gelu(input, options.approximate()); +} + +// ============================================================================ + +inline Tensor silu(const Tensor& input) { + return torch::silu(input); +} + +// ============================================================================ + +inline Tensor mish(const Tensor& input) { + return torch::mish(input); +} + +// ============================================================================ + +inline Tensor prelu(const Tensor& input, const Tensor& weight) { + return torch::prelu(input, weight); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor relu(Tensor input, bool inplace) { + if (inplace) { + return torch::relu_(input); + } else { + return torch::relu(input); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.relu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::ReLUFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::relu(x, F::ReLUFuncOptions().inplace(true)); +/// ``` +inline Tensor relu(Tensor input, const ReLUFuncOptions& options = {}) { + return detail::relu(std::move(input), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor relu6(Tensor input, bool inplace) { + if (inplace) { + return torch::relu6_(input); + } else { + return torch::relu6(input); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.relu6 +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::ReLU6FuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::relu6(x, F::ReLU6FuncOptions().inplace(true)); +/// ``` +inline Tensor relu6(Tensor input, const ReLU6FuncOptions& options = {}) { + return detail::relu6(std::move(input), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor rrelu( + Tensor input, + double lower, + double upper, + bool training, + bool inplace) { + if (inplace) { + return torch::rrelu_(input, lower, upper, training); + } else { + return torch::rrelu(input, lower, upper, training); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.rrelu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::RReLUFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::rrelu(x, F::RReLUFuncOptions().lower(0.1).upper(0.4).inplace(true)); +/// ``` +inline Tensor rrelu(Tensor input, const RReLUFuncOptions& options = {}) { + return detail::rrelu( + std::move(input), + options.lower(), + options.upper(), + options.training(), + options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor celu(Tensor input, double alpha, bool inplace) { + if (inplace) { + return torch::celu_(input, alpha); + } else { + return torch::celu(input, alpha); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.celu +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::CELUFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true)); +/// ``` +inline Tensor celu(Tensor input, const CELUFuncOptions& options = {}) { + return detail::celu(std::move(input), options.alpha(), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor softplus(const Tensor& input, double beta, double threshold) { + return torch::softplus(input, beta, threshold); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softplus +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SoftplusFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0)); +/// ``` +inline Tensor softplus( + const Tensor& input, + const SoftplusFuncOptions& options = {}) { + return detail::softplus(input, options.beta(), options.threshold()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor softshrink(const Tensor& input, double lambda) { + return torch::softshrink(input, lambda); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softshrink +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SoftshrinkFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softshrink(x, F::SoftshrinkFuncOptions(0.42)); +/// ``` +inline Tensor softshrink( + const Tensor& input, + const SoftshrinkFuncOptions& options = {}) { + return detail::softshrink(input, options.lambda()); +} + +// ============================================================================ + +inline Tensor softsign(const Tensor& input) { + return input / (input.abs() + 1); +} + +// ============================================================================ + +inline Tensor tanhshrink(const Tensor& input) { + return input - input.tanh(); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor threshold( + Tensor input, + double threshold, + double value, + bool inplace) { + if (inplace) { + return torch::threshold_(input, threshold, value); + } else { + return torch::threshold(input, threshold, value); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.threshold +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::ThresholdFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true)); +/// ``` +inline Tensor threshold(Tensor input, const ThresholdFuncOptions& options) { + return detail::threshold( + std::move(input), + options.threshold(), + options.value(), + options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple multi_head_attention_forward( + const Tensor& query, + const Tensor& key, + const Tensor& value, + int64_t embed_dim_to_check, + int64_t num_heads, + const Tensor& in_proj_weight, + const Tensor& in_proj_bias, + const Tensor& bias_k, + const Tensor& bias_v, + bool add_zero_attn, + double dropout_p, + const Tensor& out_proj_weight, + const Tensor& out_proj_bias, + bool training = true, + const Tensor& key_padding_mask = {}, + bool need_weights = true, + const Tensor& attn_mask = {}, + bool use_separate_proj_weight = false, + const Tensor& q_proj_weight = {}, + const Tensor& k_proj_weight = {}, + const Tensor& v_proj_weight = {}, + const Tensor& static_k = {}, + const Tensor& static_v = {}, + bool average_attn_weights = true) { + namespace F = torch::nn::functional; + + const auto query_sizes = query.sizes(); + const auto& tgt_len = query_sizes[0]; + const auto& bsz = query_sizes[1]; + const auto& embed_dim = query_sizes[2]; + TORCH_INTERNAL_ASSERT(embed_dim == embed_dim_to_check); + TORCH_INTERNAL_ASSERT(key.sizes() == value.sizes()); + + const auto head_dim = embed_dim / num_heads; + TORCH_CHECK( + head_dim * num_heads == embed_dim, + "embed_dim must be divisible by num_heads"); + const auto scaling = 1 / std::sqrt(head_dim); + + Tensor q, k, v; + if (!use_separate_proj_weight) { + if (torch::equal(query, key) && torch::equal(key, value)) { + // self-attention + const auto chunks = + F::linear(query, in_proj_weight, in_proj_bias).chunk(3, /*dim=*/-1); + q = chunks[0]; + k = chunks[1]; + v = chunks[2]; + } else if (torch::equal(key, value)) { + // encoder-decoder attention + // This is inline in_proj function with in_proj_weight and in_proj_bias + auto _b = in_proj_bias; + auto _start = 0; + auto _end = embed_dim; + auto _w = in_proj_weight.slice(/*dim=*/0, _start, _end); + if (_b.defined()) { + _b = _b.slice(/*dim=*/0, _start, _end); + } + q = F::linear(query, _w, _b); + + if (!key.defined()) { + TORCH_INTERNAL_ASSERT(!value.defined()); + k.reset(); + v.reset(); + } else { + // This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias; + _start = embed_dim; + _w = in_proj_weight.slice(/*dim=*/0, _start); + if (_b.defined()) { + _b = _b.slice(/*dim=*/0, _start); + } + const auto chunks = F::linear(key, _w, _b).chunk(2, /*dim=*/-1); + k = chunks[0]; + v = chunks[1]; + } + } else { + // This is inline in_proj function with in_proj_weight and in_proj_bias + auto _b = in_proj_bias; + auto _start = 0; + auto _end = embed_dim; + auto _w = in_proj_weight.slice(/*dim=*/0, _start, _end); + if (_b.defined()) { + _b = _b.slice(/*dim=*/0, _start, _end); + } + q = F::linear(query, _w, _b); + + // This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias; + _start = embed_dim; + _end = embed_dim * 2; + _w = in_proj_weight.slice(/*dim=*/0, _start, _end); + if (_b.defined()) { + _b = _b.slice(/*dim=*/0, _start, _end); + } + k = F::linear(key, _w, _b); + + // This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias; + _start = embed_dim * 2; + _w = in_proj_weight.slice(/*dim=*/0, _start); + if (_b.defined()) { + _b = _b.slice(0, _start); + } + v = F::linear(value, _w, _b); + } + } else { + const auto& q_proj_weight_non_opt = q_proj_weight; + { + const auto sizes = q_proj_weight_non_opt.sizes(); + const auto len1 = sizes[0]; + const auto len2 = sizes[1]; + TORCH_CHECK(len1 == embed_dim && len2 == query.size(-1)); + } + + const auto& k_proj_weight_non_opt = k_proj_weight; + { + const auto sizes = k_proj_weight_non_opt.sizes(); + const auto len1 = sizes[0]; + const auto len2 = sizes[1]; + TORCH_CHECK(len1 == embed_dim && len2 == key.size(-1)); + } + + const auto& v_proj_weight_non_opt = v_proj_weight; + { + const auto sizes = v_proj_weight_non_opt.sizes(); + const auto len1 = sizes[0]; + const auto len2 = sizes[1]; + TORCH_CHECK(len1 == embed_dim && len2 == value.size(-1)); + } + + if (in_proj_bias.defined()) { + q = F::linear( + query, + q_proj_weight_non_opt, + in_proj_bias.slice(/*dim=*/0, 0, embed_dim)); + k = F::linear( + key, + k_proj_weight_non_opt, + in_proj_bias.slice(/*dim=*/0, embed_dim, (embed_dim * 2))); + v = F::linear( + value, + v_proj_weight_non_opt, + in_proj_bias.slice(/*dim=*/0, (embed_dim * 2))); + } else { + q = F::linear(query, q_proj_weight_non_opt, in_proj_bias); + k = F::linear(key, k_proj_weight_non_opt, in_proj_bias); + v = F::linear(value, v_proj_weight_non_opt, in_proj_bias); + } + } + q = q * scaling; + Tensor attn_mask_ = attn_mask; + Tensor key_padding_mask_ = key_padding_mask; + if (bias_k.defined() && bias_v.defined()) { + if (!static_k.defined() && !static_v.defined()) { + k = torch::cat({k, bias_k.repeat({1, bsz, 1})}); + v = torch::cat({v, bias_v.repeat({1, bsz, 1})}); + if (attn_mask_.defined()) { + attn_mask_ = torch::cat( + {attn_mask_, + torch::zeros( + {attn_mask_.size(0), 1}, + at::TensorOptions(attn_mask_.dtype()) + .device(attn_mask_.device()))}, + /*dim=*/1); + } + if (key_padding_mask_.defined()) { + key_padding_mask_ = torch::cat( + {key_padding_mask_, + torch::zeros( + {key_padding_mask_.size(0), 1}, + at::TensorOptions(key_padding_mask_.dtype()) + .device(key_padding_mask_.device()))}, + /*dim=*/1); + } + } else { + TORCH_CHECK(!static_k.defined(), "bias cannot be added to static key."); + TORCH_CHECK(!static_v.defined(), "bias cannot be added to static value."); + } + } else { + TORCH_CHECK(!bias_k.defined()); + TORCH_CHECK(!bias_v.defined()); + } + q = q.contiguous().view({tgt_len, bsz * num_heads, head_dim}).transpose(0, 1); + if (k.defined()) { + k = k.contiguous().view({-1, bsz * num_heads, head_dim}).transpose(0, 1); + } + if (v.defined()) { + v = v.contiguous().view({-1, bsz * num_heads, head_dim}).transpose(0, 1); + } + if (static_k.defined()) { + TORCH_CHECK(static_k.size(0) == bsz * num_heads); + TORCH_CHECK(static_k.size(2) == head_dim); + k = static_k; + } + if (static_v.defined()) { + TORCH_CHECK(static_v.size(0) == bsz * num_heads); + TORCH_CHECK(static_v.size(2) == head_dim); + v = static_v; + } + auto src_len = k.size(1); + if (key_padding_mask_.defined()) { + TORCH_CHECK(key_padding_mask_.size(0) == bsz); + TORCH_CHECK(key_padding_mask_.size(1) == src_len); + } + if (add_zero_attn) { + src_len += 1; + auto k_sizes = k.sizes().vec(); + k_sizes[1] = 1; + k = torch::cat( + {k, + torch::zeros( + k_sizes, at::TensorOptions(k.dtype()).device(k.device()))}, + /*dim=*/1); + auto v_sizes = v.sizes().vec(); + v_sizes[1] = 1; + v = torch::cat( + {v, + torch::zeros( + v_sizes, at::TensorOptions(v.dtype()).device(v.device()))}, + /*dim=*/1); + if (attn_mask_.defined()) { + attn_mask_ = torch::cat( + {attn_mask_, + torch::zeros( + {attn_mask_.size(0), 1}, + at::TensorOptions(attn_mask_.dtype()) + .device(attn_mask_.device()))}, + /*dim=*/1); + } + if (key_padding_mask_.defined()) { + key_padding_mask_ = torch::cat( + {key_padding_mask_, + torch::zeros( + {key_padding_mask_.size(0), 1}, + at::TensorOptions(key_padding_mask_.dtype()) + .device(key_padding_mask_.device()))}, + /*dim=*/1); + } + } + auto attn_output_weights = torch::bmm(q, k.transpose(1, 2)); + TORCH_CHECK( + attn_output_weights.sizes() == + IntArrayRef({bsz * num_heads, tgt_len, src_len})); + if (attn_mask_.defined()) { + attn_mask_ = attn_mask_.unsqueeze(0); + attn_output_weights += attn_mask_; + } + if (key_padding_mask_.defined()) { + attn_output_weights = + attn_output_weights.view({bsz, num_heads, tgt_len, src_len}); + attn_output_weights = AT_DISPATCH_FLOATING_TYPES( + attn_output_weights.scalar_type(), + "attn_output_weights.masked_fill", + [&]() { + return attn_output_weights.masked_fill( + key_padding_mask_.unsqueeze(1).unsqueeze(2), + -std::numeric_limits::infinity()); + }); + attn_output_weights = + attn_output_weights.view({bsz * num_heads, tgt_len, src_len}); + } + // NOLINTNEXTLINE(bugprone-argument-comment) + attn_output_weights = F::softmax(attn_output_weights, /*dim=*/-1); + attn_output_weights = F::dropout( + attn_output_weights, + F::DropoutFuncOptions().p(dropout_p).training(training)); + auto attn_output = torch::bmm(attn_output_weights, v); + TORCH_CHECK( + attn_output.sizes() == IntArrayRef({bsz * num_heads, tgt_len, head_dim})); + attn_output = + attn_output.transpose(0, 1).contiguous().view({tgt_len, bsz, embed_dim}); + attn_output = F::linear(attn_output, out_proj_weight, out_proj_bias); + if (need_weights) { + attn_output_weights = + attn_output_weights.view({bsz, num_heads, tgt_len, src_len}); + if (average_attn_weights) { + // average attention weights over heads + attn_output_weights = attn_output_weights.sum(/*dim=*/1) / num_heads; + } + return std::make_tuple(attn_output, attn_output_weights); + } else { + return std::make_tuple(attn_output, Tensor()); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +inline std::tuple multi_head_attention_forward( + const Tensor& query, + const Tensor& key, + const Tensor& value, + const MultiheadAttentionForwardFuncOptions& options) { + return detail::multi_head_attention_forward( + query, + key, + value, + options.embed_dim_to_check(), + options.num_heads(), + options.in_proj_weight(), + options.in_proj_bias(), + options.bias_k(), + options.bias_v(), + options.add_zero_attn(), + options.dropout_p(), + options.out_proj_weight(), + options.out_proj_bias(), + options.training(), + options.key_padding_mask(), + options.need_weights(), + options.attn_mask(), + options.use_separate_proj_weight(), + options.q_proj_weight(), + options.k_proj_weight(), + options.v_proj_weight(), + options.static_k(), + options.static_v(), + options.average_attn_weights()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/batchnorm.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/batchnorm.h new file mode 100644 index 0000000000000000000000000000000000000000..e7b7325157616a0f90ac70dc944ccdaede9cc2d7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/batchnorm.h @@ -0,0 +1,83 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor batch_norm( + const Tensor& input, + const Tensor& running_mean, + const Tensor& running_var, + Tensor weight, + Tensor bias, + bool training, + c10::optional momentum, + double eps) { + TORCH_CHECK( + input.dim() >= 2, + "Expected at least 2 input dimensions, but got ", + input.dim()); + if (training) { + auto size = input.sizes(); + int64_t size_prods = size[0]; + for (const auto i : c10::irange(size.size() - 2)) { + size_prods *= size[i + 2]; + } + TORCH_CHECK( + size_prods != 1, + "Expected more than 1 value per channel when training, got input size ", + size); + } + + return torch::batch_norm( + input, + weight, + bias, + running_mean, + running_var, + training, + momentum.value(), + eps, + at::globalContext().userEnabledCuDNN()); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.batch_norm +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::BatchNormFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::batch_norm(input, mean, variance, +/// F::BatchNormFuncOptions().weight(weight).bias(bias).momentum(0.1).eps(1e-05).training(false)); +/// ``` +inline Tensor batch_norm( + const Tensor& input, + const Tensor& running_mean, + const Tensor& running_var, + const BatchNormFuncOptions& options = {}) { + return detail::batch_norm( + input, + running_mean, + running_var, + options.weight(), + options.bias(), + options.training(), + options.momentum(), + options.eps()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/distance.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/distance.h new file mode 100644 index 0000000000000000000000000000000000000000..27914017fef22d93823a6b8c7330eab01a835706 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/distance.h @@ -0,0 +1,88 @@ +#pragma once + +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor cosine_similarity( + const Tensor& x1, + const Tensor& x2, + int64_t dim, + double eps) { + return torch::cosine_similarity(x1, x2, dim, eps); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cosine_similarity +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::CosineSimilarityFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cosine_similarity(input1, input2, +/// F::CosineSimilarityFuncOptions().dim(1)); +/// ``` +inline Tensor cosine_similarity( + const Tensor& x1, + const Tensor& x2, + const CosineSimilarityFuncOptions& options = {}) { + return detail::cosine_similarity(x1, x2, options.dim(), options.eps()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor pairwise_distance( + const Tensor& x1, + const Tensor& x2, + double p, + double eps, + bool keepdim) { + return torch::pairwise_distance(x1, x2, p, eps, keepdim); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pairwise_distance +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::PairwiseDistanceFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1)); +/// ``` +inline Tensor pairwise_distance( + const Tensor& x1, + const Tensor& x2, + const PairwiseDistanceFuncOptions& options = {}) { + return detail::pairwise_distance( + x1, x2, options.p(), options.eps(), options.keepdim()); +} + +// ============================================================================ + +/// Computes the p-norm distance between every pair of row vectors in the input. +/// This function will be faster if the rows are contiguous. +inline Tensor pdist(const Tensor& input, double p = 2.0) { + return torch::pdist(input, p); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/dropout.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/dropout.h new file mode 100644 index 0000000000000000000000000000000000000000..cb233e5a01065509da1c91e8d1855bab5308b7ed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/dropout.h @@ -0,0 +1,234 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { + +inline Tensor dropout(Tensor input, double p, bool training, bool inplace) { + TORCH_CHECK( + p >= 0. && p <= 1., + "dropout probability has to be between 0 and 1, but got ", + p); + if (inplace) { + return torch::dropout_(input, p, training); + } else { + return torch::dropout(input, p, training); + } +} + +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::DropoutFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::dropout(input, F::DropoutFuncOptions().p(0.5)); +/// ``` +inline Tensor dropout(Tensor input, const DropoutFuncOptions& options = {}) { + return detail::dropout( + std::move(input), options.p(), options.training(), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { + +template +inline Tensor _dropoutNd_helper( + Tensor input, + double p, + bool training, + bool inplace, + const char* fn_name) { + TORCH_CHECK( + p >= 0. && p <= 1., + "dropout probability has to be between 0 and 1, but got ", + p); + + auto inp_dim = input.dim(); + auto is_batched = inp_dim == batched_dim; + if (!is_batched) { + if (inplace) { + input = input.unsqueeze_(0); + } else { + input = input.unsqueeze(0); + } + } + + Tensor result; + if (inplace) { + result = torch::feature_dropout_(input, p, training); + } else { + result = torch::feature_dropout(input, p, training); + } + + if (!is_batched) { + if (inplace) { + result = result.squeeze_(0); + } else { + result = result.squeeze(0); + } + } + return result; +} + +inline Tensor dropout2d(Tensor input, double p, bool training, bool inplace) { + return _dropoutNd_helper<3, 4>( + std::move(input), p, training, inplace, "dropout2d"); +} + +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout2d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::Dropout2dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5)); +/// ``` +inline Tensor dropout2d( + Tensor input, + const Dropout2dFuncOptions& options = {}) { + return detail::dropout2d( + std::move(input), options.p(), options.training(), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { + +inline Tensor dropout3d(Tensor input, double p, bool training, bool inplace) { + return _dropoutNd_helper<4, 5>( + std::move(input), p, training, inplace, "dropout3d"); +} + +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout3d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::Dropout3dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5)); +/// ``` +inline Tensor dropout3d( + Tensor input, + const Dropout3dFuncOptions& options = {}) { + return detail::dropout3d( + std::move(input), options.p(), options.training(), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { + +inline Tensor alpha_dropout( + Tensor input, + double p, + bool training, + bool inplace) { + if (p < 0. || p > 1.) { + TORCH_CHECK( + false, "dropout probability has to be between 0 and 1, but got ", p); + } + return inplace ? torch::alpha_dropout_(input, p, training) + : torch::alpha_dropout(input, p, training); +} + +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.alpha_dropout +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::AlphaDropoutFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::alpha_dropout(input, +/// F::AlphaDropoutFuncOptions().p(0.5).training(false)); +/// ``` +inline Tensor alpha_dropout( + Tensor input, + const AlphaDropoutFuncOptions& options = {}) { + return detail::alpha_dropout( + std::move(input), options.p(), options.training(), options.inplace()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { + +inline Tensor feature_alpha_dropout( + Tensor input, + double p, + bool training, + bool inplace) { + if (p < 0. || p > 1.) { + TORCH_CHECK( + false, "dropout probability has to be between 0 and 1, but got ", p); + } + return inplace ? torch::feature_alpha_dropout_(input, p, training) + : torch::feature_alpha_dropout(input, p, training); +} + +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.feature_alpha_dropout +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::FeatureAlphaDropoutFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::feature_alpha_dropout(input, +/// F::FeatureAlphaDropoutFuncOptions().p(0.5).training(false)); +/// ``` +inline Tensor feature_alpha_dropout( + Tensor input, + const FeatureAlphaDropoutFuncOptions& options = {}) { + return detail::feature_alpha_dropout( + std::move(input), options.p(), options.training(), options.inplace()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/embedding.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/embedding.h new file mode 100644 index 0000000000000000000000000000000000000000..37f373774e91357e2def5fc319f28c6da70a1411 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/embedding.h @@ -0,0 +1,211 @@ +#pragma once + +#include + +namespace torch { +namespace nn { +namespace functional { + +inline Tensor one_hot(const Tensor& tensor, int64_t num_classes = -1) { + return torch::one_hot(tensor, num_classes); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline void _no_grad_embedding_renorm_( + Tensor weight, + const Tensor& input, + float max_norm, + float norm_type) { + torch::NoGradGuard no_grad; + torch::embedding_renorm_(weight, input, max_norm, norm_type); +} + +inline Tensor embedding( + const Tensor& input, + const Tensor& weight, + c10::optional padding_idx, + c10::optional max_norm, + double norm_type, + bool scale_grad_by_freq, + bool sparse) { + auto input_ = input; + + if (padding_idx != c10::nullopt) { + if (*padding_idx > 0) { + TORCH_CHECK( + *padding_idx < weight.size(0), + "Padding_idx must be within num_embeddings"); + } else if (*padding_idx < 0) { + TORCH_CHECK( + *padding_idx >= -weight.size(0), + "Padding_idx must be within num_embedding"); + padding_idx = weight.size(0) + *padding_idx; + } + } else { + padding_idx = -1; + } + + if (max_norm != c10::nullopt) { + input_ = input_.contiguous(); + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + _no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type); + } + return torch::embedding( + weight, input_, *padding_idx, scale_grad_by_freq, sparse); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.embedding +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::EmbeddingFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::embedding(input, weight, +/// F::EmbeddingFuncOptions().norm_type(2.5).scale_grad_by_freq(true).sparse(true)); +/// ``` +inline Tensor embedding( + const Tensor& input, + const Tensor& weight, + const EmbeddingFuncOptions& options = {}) { + return detail::embedding( + input, + weight, + options.padding_idx(), + options.max_norm(), + options.norm_type(), + options.scale_grad_by_freq(), + options.sparse()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor embedding_bag( + const Tensor& input, + const Tensor& weight, + const Tensor& offsets, + c10::optional max_norm, + double norm_type, + bool scale_grad_by_freq, + EmbeddingBagMode mode, + bool sparse, + const Tensor& per_sample_weights, + bool include_last_offset, + c10::optional padding_idx) { + auto input_ = input; + auto offsets_ = offsets; + auto per_sample_weights_ = per_sample_weights; + TORCH_CHECK( + !per_sample_weights_.defined() || + input_.sizes() == per_sample_weights_.sizes(), + "embedding_bag: If per_sample_weights (", + per_sample_weights_.sizes(), + ") is not null, then it must have the same shape as the input (", + input_.sizes(), + ")"); + if (input_.dim() == 2) { + TORCH_CHECK( + !offsets_.defined(), + "If input is 2D, then offsets has to be null, as input is treated is a mini-batch of fixed length sequences. However, found offsets of type Tensor"); + offsets_ = torch::arange( + 0, + input_.numel(), + input_.size(1), + torch::TensorOptions().dtype(torch::kLong).device(input_.device())); + input_ = input_.reshape(-1); + if (per_sample_weights_.defined()) { + per_sample_weights_ = per_sample_weights_.reshape(-1); + } + } else if (input_.dim() == 1) { + TORCH_CHECK( + offsets_.defined(), "offsets has to be a 1D Tensor but got null"); + TORCH_CHECK(offsets_.dim() == 1, "offsets has to be a 1D Tensor"); + } else { + TORCH_CHECK( + false, + "input has to be 1D or 2D Tensor, but got Tensor of dimension ", + input_.dim()); + } + + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int mode_enum; + if (std::holds_alternative(mode)) { + mode_enum = 0; + } else if (std::holds_alternative(mode)) { + mode_enum = 1; + } else if (std::holds_alternative(mode)) { + mode_enum = 2; + TORCH_CHECK( + !scale_grad_by_freq, + "max mode does not support scaling the gradient by the frequency"); + TORCH_CHECK(!sparse, "max mode does not support sparse weights"); + } else { + TORCH_CHECK(false, "mode has to be one of sum, mean or max"); + } + + if (max_norm != c10::nullopt) { + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + _no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type); + } + + TORCH_CHECK( + !per_sample_weights_.defined() || std::get_if(&mode), + "embedding_bag: per_sample_weights was not null. ", + "per_sample_weights is only supported for mode='kSum' (got mode='", + torch::enumtype::get_enum_name(mode), + "').Please open a feature request on GitHub."); + + return std::get<0>(torch::embedding_bag( + weight, + input_, + offsets_, + scale_grad_by_freq, + mode_enum, + sparse, + per_sample_weights_, + include_last_offset, + padding_idx)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.embedding_bag +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::EmbeddingBagFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::embedding_bag(input, weight, +/// F::EmbeddingBagFuncOptions().mode(torch::kSum).offsets(offsets)); +/// ``` +inline Tensor embedding_bag( + const Tensor& input, + const Tensor& weight, + const EmbeddingBagFuncOptions& options = {}) { + return detail::embedding_bag( + input, + weight, + options.offsets(), + options.max_norm(), + options.norm_type(), + options.scale_grad_by_freq(), + options.mode(), + options.sparse(), + options.per_sample_weights(), + options.include_last_offset(), + options.padding_idx()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/fold.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/fold.h new file mode 100644 index 0000000000000000000000000000000000000000..cd47138e32e9e63ffd79a53cf5ea4a22cf411140 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/fold.h @@ -0,0 +1,102 @@ +#pragma once + +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor fold( + const Tensor& input, + ExpandingArray<2> output_size, + ExpandingArray<2> kernel_size, + ExpandingArray<2> dilation, + ExpandingArray<2> padding, + ExpandingArray<2> stride) { + if (input.dim() == 3 || input.dim() == 2) { + return torch::col2im( + input, output_size, kernel_size, dilation, padding, stride); + } else { + TORCH_CHECK( + false, + "Input Error: Only unbatched (2D) or batched (3D) input Tensors are supported " + "(got ", + input.dim(), + "D)"); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.fold +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::FoldFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2})); +/// ``` +inline Tensor fold(const Tensor& input, const FoldFuncOptions& options) { + return detail::fold( + input, + options.output_size(), + options.kernel_size(), + options.dilation(), + options.padding(), + options.stride()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor unfold( + const Tensor& input, + ExpandingArray<2> kernel_size, + ExpandingArray<2> dilation, + ExpandingArray<2> padding, + ExpandingArray<2> stride) { + if (input.dim() == 4) { + return torch::im2col(input, kernel_size, dilation, padding, stride); + } else { + TORCH_CHECK( + false, + "Input Error: Only 4D input Tensors are supported " + "(got ", + input.dim(), + "D)"); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.unfold +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::UnfoldFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2)); +/// ``` +inline Tensor unfold(const Tensor& input, const UnfoldFuncOptions& options) { + return detail::unfold( + input, + options.kernel_size(), + options.dilation(), + options.padding(), + options.stride()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/instancenorm.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/instancenorm.h new file mode 100644 index 0000000000000000000000000000000000000000..bfa42a32f7940ecad7111e432bd1c1ce8b6a48eb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/instancenorm.h @@ -0,0 +1,63 @@ +#pragma once + +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor instance_norm( + const Tensor& input, + const Tensor& running_mean, + const Tensor& running_var, + const Tensor& weight, + const Tensor& bias, + bool use_input_stats, + double momentum, + double eps) { + return torch::instance_norm( + input, + weight, + bias, + running_mean, + running_var, + use_input_stats, + momentum, + eps, + at::globalContext().userEnabledCuDNN()); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.instance_norm +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::InstanceNormFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::instance_norm(input, +/// F::InstanceNormFuncOptions().running_mean(mean).running_var(variance).weight(weight).bias(bias).momentum(0.1).eps(1e-5)); +/// ``` +inline Tensor instance_norm( + const Tensor& input, + const InstanceNormFuncOptions& options = {}) { + return detail::instance_norm( + input, + options.running_mean(), + options.running_var(), + options.weight(), + options.bias(), + options.use_input_stats(), + options.momentum(), + options.eps()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/linear.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/linear.h new file mode 100644 index 0000000000000000000000000000000000000000..ffeafcd712af0421a23fe6f344f032a35e40c674 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/linear.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +namespace torch { +namespace nn { +namespace functional { + +inline Tensor bilinear( + const Tensor& input1, + const Tensor& input2, + const Tensor& weight, + const Tensor& bias = Tensor()) { + return torch::bilinear(input1, input2, weight, bias); +} + +// ============================================================================ + +inline Tensor linear( + const Tensor& input, + const Tensor& weight, + const Tensor& bias = {}) { + if (input.dim() == 2 && bias.defined()) { + // fused op is marginally faster + return torch::addmm(bias, input, weight.t()); + } else { + auto output = input.matmul(weight.t()); + if (bias.defined()) { + output += bias; + } + return output; + } +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/loss.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/loss.h new file mode 100644 index 0000000000000000000000000000000000000000..691ba4ce3041e351239d2a5c2cd16ea614f57319 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/loss.h @@ -0,0 +1,1044 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor l1_loss( + const Tensor& input, + const Tensor& target, + L1LossFuncOptions::reduction_t reduction) { + return torch::l1_loss(input, target, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.l1_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::L1LossFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone)); +/// ``` +inline Tensor l1_loss( + const Tensor& input, + const Tensor& target, + const L1LossFuncOptions& options = {}) { + return detail::l1_loss(input, target, options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor kl_div( + const Tensor& input, + const Tensor& target, + KLDivFuncOptions::reduction_t reduction, + bool log_target = false) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + torch::Reduction::Reduction reduction_enum; + + if (std::holds_alternative(reduction)) { + TORCH_WARN( + "reduction: 'mean' divides the total loss by both the batch size and the support size." + "'batchmean' divides only by the batch size, and aligns with the KL div math definition." + "'mean' will be changed to behave the same as 'batchmean' in the next major release."); + } + + // special case for batchmean + if (std::holds_alternative(reduction)) { + reduction_enum = torch::Reduction::Sum; + } else { + reduction_enum = enumtype::reduction_get_enum(reduction); + } + + auto reduced = torch::kl_div(input, target, reduction_enum, log_target); + + if (std::holds_alternative(reduction) && + input.dim() != 0) { + reduced = reduced / input.sizes()[0]; + } + + return reduced; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.kl_div +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::KLDivFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::kl_div(input, target, +/// F::KLDivFuncOptions.reduction(torch::kNone).log_target(false)); +/// ``` +inline Tensor kl_div( + const Tensor& input, + const Tensor& target, + const KLDivFuncOptions& options = {}) { + return detail::kl_div( + input, target, options.reduction(), options.log_target()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor mse_loss( + const Tensor& input, + const Tensor& target, + MSELossFuncOptions::reduction_t reduction) { + if (!(target.sizes() == input.sizes())) { + TORCH_WARN( + "Using a target size (", + target.sizes(), + ") that is different to the input size (", + input.sizes(), + "). ", + "This will likely lead to incorrect results due to broadcasting. ", + "Please ensure they have the same size."); + } + std::vector broadcast_tensors = + torch::broadcast_tensors({input, target}); + auto expanded_input = broadcast_tensors[0]; + auto expanded_target = broadcast_tensors[1]; + return torch::mse_loss( + expanded_input, expanded_target, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.mse_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MSELossFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone)); +/// ``` +inline Tensor mse_loss( + const Tensor& input, + const Tensor& target, + const MSELossFuncOptions& options = {}) { + return detail::mse_loss(input, target, options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor binary_cross_entropy( + const Tensor& input, + const Tensor& target, + const Tensor& weight, + BinaryCrossEntropyFuncOptions::reduction_t reduction) { + auto reduction_enum = enumtype::reduction_get_enum(reduction); + + if (target.sizes() != input.sizes()) { + TORCH_CHECK( + false, + "Using a target size (", + target.sizes(), + ") ", + "that is different to the input size (", + input.sizes(), + ") is deprecated. ", + "Please ensure they have the same size."); + } + + auto weight_ = weight; + if (weight_.defined()) { + auto new_size = at::infer_size(target.sizes(), weight_.sizes()); + weight_ = weight_.expand(new_size); + } + + return torch::binary_cross_entropy(input, target, weight_, reduction_enum); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.binary_cross_entropy +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::BinaryCrossEntropyFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::binary_cross_entropy(input, target, +/// F::BinaryCrossEntropyFuncOptions().weight(weight)); +/// ``` +inline Tensor binary_cross_entropy( + const Tensor& input, + const Tensor& target, + const BinaryCrossEntropyFuncOptions& options = {}) { + return detail::binary_cross_entropy( + input, target, options.weight(), options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor hinge_embedding_loss( + const Tensor& input, + const Tensor& target, + double margin, + HingeEmbeddingLossFuncOptions::reduction_t reduction) { + return torch::hinge_embedding_loss( + input, target, margin, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hinge_embedding_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::HingeEmbeddingLossFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::hinge_embedding_loss(input, target, +/// F::HingeEmbeddingLossFuncOptions().margin(2)); +/// ``` +inline Tensor hinge_embedding_loss( + const Tensor& input, + const Tensor& target, + const HingeEmbeddingLossFuncOptions& options = {}) { + return detail::hinge_embedding_loss( + input, target, options.margin(), options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor multi_margin_loss( + const Tensor& input, + const Tensor& target, + int64_t p, + double margin, + const Tensor& weight, + MultiMarginLossFuncOptions::reduction_t reduction) { + TORCH_CHECK(p == 1 || p == 2, "only p == 1 and p == 2 supported"); + if (weight.defined()) { + TORCH_CHECK(weight.dim() == 1, "weight must be one-dimensional"); + } + + return torch::multi_margin_loss( + input, + target, + p, + margin, + weight, + enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multi_margin_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::MultiMarginLossFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multi_margin_loss(input, target, +/// F::MultiMarginLossFuncOptions().margin(2).weight(weight)); +/// ``` +inline Tensor multi_margin_loss( + const Tensor& input, + const Tensor& target, + const MultiMarginLossFuncOptions& options = {}) { + return detail::multi_margin_loss( + input, + target, + options.p(), + options.margin(), + options.weight(), + options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor cosine_embedding_loss( + const Tensor& input1, + const Tensor& input2, + const Tensor& target, + double margin, + CosineEmbeddingLossFuncOptions::reduction_t reduction) { + return torch::cosine_embedding_loss( + input1, input2, target, margin, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cosine_embedding_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::CosineEmbeddingLossFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cosine_embedding_loss(input1, input2, target, +/// F::CosineEmbeddingLossFuncOptions().margin(0.5)); +/// ``` +inline Tensor cosine_embedding_loss( + const Tensor& input1, + const Tensor& input2, + const Tensor& target, + const CosineEmbeddingLossFuncOptions& options = {}) { + return detail::cosine_embedding_loss( + input1, input2, target, options.margin(), options.reduction()); +} + +// ============================================================================ + +inline Tensor _smooth_l1_loss( + const Tensor& input, + const Tensor& target, + double beta = 1.) { + auto t = torch::abs(input - target); + return torch::where(t < beta, 0.5 * torch::pow(t, 2) / beta, t - 0.5 * beta); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor smooth_l1_loss( + const Tensor& input, + const Tensor& target, + SmoothL1LossFuncOptions::reduction_t reduction, + c10::optional beta_opt = c10::nullopt) { + if (target.sizes() != input.sizes()) { + TORCH_WARN( + "Using a target size (", + target.sizes(), + ") that is different to the input size (", + input.sizes(), + "). ", + "This will likely lead to incorrect results due to broadcasting. ", + "Please ensure they have the same size."); + } + double beta = beta_opt.value_or(1.0); + + std::vector expanded_tensors = + torch::broadcast_tensors({input, target}); + return torch::smooth_l1_loss( + expanded_tensors[0], + expanded_tensors[1], + enumtype::reduction_get_enum(reduction), + beta); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.smooth_l1_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SmoothL1LossFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone)); +/// ``` +inline Tensor smooth_l1_loss( + const Tensor& input, + const Tensor& target, + const SmoothL1LossFuncOptions& options = {}) { + return detail::smooth_l1_loss( + input, target, options.reduction(), options.beta()); +} + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.smooth_l1_loss +/// about the exact behavior of this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::smooth_l1_loss(input, target, /*options=*/torch::kNone, /*beta=*/0.5); +/// ``` +inline Tensor smooth_l1_loss( + const Tensor& input, + const Tensor& target, + const SmoothL1LossFuncOptions& options, + double beta) { + TORCH_CHECK( + options.beta() == c10::nullopt, + "expected beta not to be provided in 'options', but got ", + options.beta().value()); + return detail::smooth_l1_loss(input, target, options.reduction(), beta); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor huber_loss( + const Tensor& input, + const Tensor& target, + HuberLossFuncOptions::reduction_t reduction, + double delta = 1.) { + if (target.sizes() != input.sizes()) { + TORCH_WARN( + "Using a target size (", + target.sizes(), + ") that is different to the input size (", + input.sizes(), + "). ", + "This will likely lead to incorrect results due to broadcasting. ", + "Please ensure they have the same size."); + } + + std::vector expanded_tensors = + torch::broadcast_tensors({input, target}); + return torch::huber_loss( + expanded_tensors[0], + expanded_tensors[1], + enumtype::reduction_get_enum(reduction), + delta); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.huber_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::HuberLossFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::huber_loss(input, target, +/// F::HuberLossFuncOptions().reduction(torch::kNone).delta(0.5)); +/// ``` +inline Tensor huber_loss( + const Tensor& input, + const Tensor& target, + const HuberLossFuncOptions& options = {}) { + return detail::huber_loss( + input, target, options.reduction(), options.delta()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor multilabel_margin_loss( + const Tensor& input, + const Tensor& target, + MultilabelMarginLossFuncOptions::reduction_t reduction) { + return torch::multilabel_margin_loss( + input, target, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multilabel_margin_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::MultilabelMarginLossFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multilabel_margin_loss(input, target, +/// F::MultilabelMarginLossFuncOptions(torch::kNone)); +/// ``` +inline Tensor multilabel_margin_loss( + const Tensor& input, + const Tensor& target, + const MultilabelMarginLossFuncOptions& options = {}) { + return detail::multilabel_margin_loss(input, target, options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor soft_margin_loss( + const Tensor& input, + const Tensor& target, + SoftMarginLossFuncOptions::reduction_t reduction) { + return torch::soft_margin_loss( + input, target, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.soft_margin_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::SoftMarginLossFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::soft_margin_loss(input, target, +/// F::SoftMarginLossFuncOptions(torch::kNone)); +/// ``` +inline Tensor soft_margin_loss( + const Tensor& input, + const Tensor& target, + const SoftMarginLossFuncOptions& options = {}) { + return detail::soft_margin_loss(input, target, options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor multilabel_soft_margin_loss( + const Tensor& input, + const Tensor& target, + const Tensor& weight, + MultilabelSoftMarginLossFuncOptions::reduction_t reduction) { + auto loss = + -(target * torch::log_sigmoid(input) + + (1 - target) * torch::log_sigmoid(-input)); + if (weight.defined()) { + loss = loss * weight; + } + + auto class_dim = input.dim() - 1; + auto C = input.size(class_dim); + loss = loss.sum(class_dim) / C; // only return N loss values + + Tensor ret; + + if (std::holds_alternative(reduction)) { + ret = loss; + } else if (std::holds_alternative(reduction)) { + ret = loss.mean(); + } else if (std::holds_alternative(reduction)) { + ret = loss.sum(); + } else { + ret = input; + TORCH_INTERNAL_ASSERT( + false, enumtype::get_enum_name(reduction), " is not valid"); + } + return ret; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multilabel_soft_margin_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::MultilabelSoftMarginLossFuncOptions` class to learn +/// what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multilabel_soft_margin_loss(input, target, +/// F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight)); +/// ``` +inline Tensor multilabel_soft_margin_loss( + const Tensor& input, + const Tensor& target, + const MultilabelSoftMarginLossFuncOptions& options = {}) { + return detail::multilabel_soft_margin_loss( + input, target, options.weight(), options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor triplet_margin_loss( + const Tensor& anchor, + const Tensor& positive, + const Tensor& negative, + double margin, + double p, + double eps, + bool swap, + TripletMarginLossFuncOptions::reduction_t reduction) { + return torch::triplet_margin_loss( + anchor, + positive, + negative, + margin, + p, + eps, + swap, + enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.triplet_margin_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::TripletMarginLossFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::triplet_margin_loss(anchor, positive, negative, +/// F::TripletMarginLossFuncOptions().margin(1.0)); +/// ``` +inline Tensor triplet_margin_loss( + const Tensor& anchor, + const Tensor& positive, + const Tensor& negative, + const TripletMarginLossFuncOptions& options = {}) { + return detail::triplet_margin_loss( + anchor, + positive, + negative, + options.margin(), + options.p(), + options.eps(), + options.swap(), + options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor triplet_margin_with_distance_loss( + const Tensor& anchor, + const Tensor& positive, + const Tensor& negative, + c10::optional + distance_function, + double margin, + bool swap, + TripletMarginWithDistanceLossFuncOptions::reduction_t reduction) { + Tensor dist_pos, dist_neg; + if (distance_function.has_value()) { + auto distance_function_impl = distance_function.value(); + dist_pos = distance_function_impl(anchor, positive); + dist_neg = distance_function_impl(anchor, negative); + } else { + dist_pos = pairwise_distance(anchor, positive); + dist_neg = pairwise_distance(anchor, negative); + } + + if (swap) { + Tensor dist_swap; + if (distance_function.has_value()) { + dist_swap = distance_function.value()(positive, negative); + } else { + dist_swap = pairwise_distance(positive, negative); + } + dist_neg = torch::min(dist_neg, dist_swap); + } + + auto loss = torch::clamp_min(dist_pos - dist_neg + margin, 0); + + Tensor ret; + if (std::holds_alternative(reduction)) { + ret = loss; + } else if (std::holds_alternative(reduction)) { + ret = loss.mean(); + } else if (std::holds_alternative(reduction)) { + ret = loss.sum(); + } else { + ret = anchor; + TORCH_INTERNAL_ASSERT( + false, enumtype::get_enum_name(reduction), " is not valid"); + } + return ret; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.triplet_margin_with_distance_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::TripletMarginWithDistanceLossFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::triplet_margin_with_distance_loss(anchor, positive, negative, +/// F::TripletMarginWithDistanceLossFuncOptions().margin(1.0)); +/// ``` +inline Tensor triplet_margin_with_distance_loss( + const Tensor& anchor, + const Tensor& positive, + const Tensor& negative, + const TripletMarginWithDistanceLossFuncOptions& options = {}) { + return detail::triplet_margin_with_distance_loss( + anchor, + positive, + negative, + options.distance_function(), + options.margin(), + options.swap(), + options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor ctc_loss( + const Tensor& log_probs, + const Tensor& targets, + const Tensor& input_lengths, + const Tensor& target_lengths, + int64_t blank, + CTCLossFuncOptions::reduction_t reduction, + bool zero_infinity) { + return torch::ctc_loss( + log_probs, + targets, + input_lengths, + target_lengths, + blank, + enumtype::reduction_get_enum(reduction), + zero_infinity); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.ctc_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::CTCLossFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::ctc_loss(log_probs, targets, input_lengths, target_lengths, +/// F::CTCLossFuncOptions().reduction(torch::kNone)); +/// ``` +inline Tensor ctc_loss( + const Tensor& log_probs, + const Tensor& targets, + const Tensor& input_lengths, + const Tensor& target_lengths, + const CTCLossFuncOptions& options = {}) { + return detail::ctc_loss( + log_probs, + targets, + input_lengths, + target_lengths, + options.blank(), + options.reduction(), + options.zero_infinity()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor poisson_nll_loss( + const Tensor& input, + const Tensor& target, + bool log_input, + bool full, + double eps, + PoissonNLLLossFuncOptions::reduction_t reduction) { + return torch::poisson_nll_loss( + input, + target, + log_input, + full, + eps, + enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.poisson_nll_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::PoissonNLLLossFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::poisson_nll_loss(input, target, +/// F::PoissonNLLLossFuncOptions().reduction(torch::kNone)); +/// ``` +inline Tensor poisson_nll_loss( + const Tensor& input, + const Tensor& target, + const PoissonNLLLossFuncOptions& options = {}) { + return detail::poisson_nll_loss( + input, + target, + options.log_input(), + options.full(), + options.eps(), + options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor margin_ranking_loss( + const Tensor& input1, + const Tensor& input2, + const Tensor& target, + double margin, + MarginRankingLossFuncOptions::reduction_t reduction) { + TORCH_CHECK( + input1.dim() == input2.dim() && input1.dim() == target.dim(), + "margin_ranking_loss : All input tensors should have same dimension but got sizes: " + "input1: ", + input1.sizes(), + ", input2: ", + input2.sizes(), + ", target: ", + target.sizes()); + return torch::margin_ranking_loss( + input1, input2, target, margin, enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.margin_ranking_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::MarginRankingLossFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::margin_ranking_loss(input1, input2, target, +/// F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum)); +/// ``` +inline Tensor margin_ranking_loss( + const Tensor& input1, + const Tensor& input2, + const Tensor& target, + const MarginRankingLossFuncOptions& options = {}) { + return detail::margin_ranking_loss( + input1, input2, target, options.margin(), options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor nll_loss( + const Tensor& input, + const Tensor& target, + const Tensor& weight, + int64_t ignore_index, + const NLLLossFuncOptions::reduction_t reduction) { + if (input.dim() < 2) { + TORCH_CHECK(false, "Expected 2 or more dimensions (got ", input.dim(), ")"); + } + + if (input.sizes()[0] != target.sizes()[0]) { + TORCH_CHECK( + false, + "Expected input batch_size (", + input.sizes()[0], + ") to match target batch_size (", + target.sizes()[0], + ")."); + } + + return torch::nll_loss_nd( + input, + target, + weight, + enumtype::reduction_get_enum(reduction), + ignore_index); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.nll_loss +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::NLLLossFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::nll_loss(input, target, +/// F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +inline Tensor nll_loss( + const Tensor& input, + const Tensor& target, + const NLLLossFuncOptions& options = {}) { + return detail::nll_loss( + input, + target, + options.weight(), + options.ignore_index(), + options.reduction()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor cross_entropy( + const Tensor& input, + const Tensor& target, + const Tensor& weight, + int64_t ignore_index, + CrossEntropyFuncOptions::reduction_t reduction, + double label_smoothing) { + return torch::cross_entropy_loss( + input, + target, + weight, + enumtype::reduction_get_enum(reduction), + ignore_index, + label_smoothing); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cross_entropy +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::CrossEntropyFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cross_entropy(input, target, +/// F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +inline Tensor cross_entropy( + const Tensor& input, + const Tensor& target, + const CrossEntropyFuncOptions& options = {}) { + return detail::cross_entropy( + input, + target, + options.weight(), + options.ignore_index(), + options.reduction(), + options.label_smoothing()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor binary_cross_entropy_with_logits( + const Tensor& input, + const Tensor& target, + const Tensor& weight, + BinaryCrossEntropyWithLogitsFuncOptions::reduction_t reduction, + const Tensor& pos_weight) { + TORCH_CHECK( + target.sizes() == input.sizes(), + "Target size (", + target.sizes(), + ") must be the same as input size (", + input.sizes(), + ")"); + + return torch::binary_cross_entropy_with_logits( + input, + target, + weight, + pos_weight, + enumtype::reduction_get_enum(reduction)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.binary_cross_entropy_with_logits +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions` class to +/// learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::binary_cross_entropy_with_logits(input, target, +/// F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum)); +/// ``` +inline Tensor binary_cross_entropy_with_logits( + const Tensor& input, + const Tensor& target, + const BinaryCrossEntropyWithLogitsFuncOptions& options = {}) { + return detail::binary_cross_entropy_with_logits( + input, + target, + options.weight(), + options.reduction(), + options.pos_weight()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/normalization.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/normalization.h new file mode 100644 index 0000000000000000000000000000000000000000..80e1a933dfb3c7e95f6087b472eda3e34ec93691 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/normalization.h @@ -0,0 +1,211 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor normalize( + const Tensor& input, + double p, + int64_t dim, + double eps, + c10::optional out) { + if (out == c10::nullopt) { + auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input); + return input / denom; + } else { + auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input); + return torch::div_out(*out, input, denom); + } +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.normalize +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::NormalizeFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1)); +/// ``` +inline Tensor normalize( + const Tensor& input, + NormalizeFuncOptions options = {}) { + return detail::normalize( + input, options.p(), options.dim(), options.eps(), options.out()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor layer_norm( + const Tensor& input, + const std::vector& normalized_shape, + const Tensor& weight, + const Tensor& bias, + double eps) { + return torch::layer_norm(input, normalized_shape, weight, bias, eps); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.layer_norm +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::LayerNormFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5)); +/// ``` +inline Tensor layer_norm( + const Tensor& input, + const LayerNormFuncOptions& options) { + return detail::layer_norm( + input, + options.normalized_shape(), + options.weight(), + options.bias(), + options.eps()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor local_response_norm( + const Tensor& input, + int64_t size, + double alpha, + double beta, + double k) { + auto dim = input.dim(); + TORCH_CHECK( + dim >= 3, + "Expected 3D or higher dimensionality input (got ", + dim, + " dimensions)"); + auto div = input.mul(input).unsqueeze(1); + if (dim == 3) { + div = detail::pad( + div, + /*pad=*/{0, 0, size / 2, (size - 1) / 2}, + /*mode=*/torch::kConstant, + /*value=*/0); + div = detail::avg_pool2d( + div, + /*kernel_size=*/{size, 1}, + /*stride=*/1, + /*padding=*/0, + /*ceil_mode=*/false, + /*count_include_pad=*/true, + /*divisor_override=*/c10::nullopt) + .squeeze(1); + } else { + auto sizes = input.sizes(); + div = div.view({sizes[0], 1, sizes[1], sizes[2], -1}); + div = detail::pad( + div, + /*pad=*/{0, 0, 0, 0, size / 2, (size - 1) / 2}, + /*mode=*/torch::kConstant, + /*value=*/0); + div = detail::avg_pool3d( + div, + /*kernel_size=*/{size, 1, 1}, + /*stride=*/1, + /*padding=*/0, + /*ceil_mode=*/false, + /*count_include_pad=*/true, + /*divisor_override=*/c10::nullopt) + .squeeze(1); + div = div.view(sizes); + } + div = div.mul(alpha).add(k).pow(beta); + return input / div; +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.local_response_norm +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::LocalResponseNormFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::local_response_norm(x, F::LocalResponseNormFuncOptions(2)); +/// ``` +inline Tensor local_response_norm( + const Tensor& input, + const LocalResponseNormFuncOptions& options) { + return detail::local_response_norm( + input, options.size(), options.alpha(), options.beta(), options.k()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor group_norm( + const Tensor& input, + int64_t num_groups, + const Tensor& weight, + const Tensor& bias, + double eps) { + return torch::group_norm( + input, + num_groups, + weight, + bias, + eps, + at::globalContext().userEnabledCuDNN()); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.group_norm +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::GroupNormFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::group_norm(input, F::GroupNormFuncOptions(2).eps(2e-5)); +/// ``` +inline Tensor group_norm( + const Tensor& input, + const GroupNormFuncOptions& options) { + return detail::group_norm( + input, + options.num_groups(), + options.weight(), + options.bias(), + options.eps()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pixelshuffle.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pixelshuffle.h new file mode 100644 index 0000000000000000000000000000000000000000..6b962cf814b105562b3d926e0a41c6a7e05658b1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pixelshuffle.h @@ -0,0 +1,47 @@ +#pragma once + +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor pixel_shuffle(const Tensor& input, int64_t upscale_factor) { + return torch::pixel_shuffle(input, upscale_factor); +} + +inline Tensor pixel_unshuffle(const Tensor& input, int64_t downscale_factor) { + return torch::pixel_unshuffle(input, downscale_factor); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pixel_shuffle +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::PixelShuffleFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2)); +/// ``` +inline Tensor pixel_shuffle( + const Tensor& input, + const PixelShuffleFuncOptions& options) { + return detail::pixel_shuffle(input, options.upscale_factor()); +} + +inline Tensor pixel_unshuffle( + const Tensor& input, + const PixelUnshuffleFuncOptions& options) { + return detail::pixel_unshuffle(input, options.downscale_factor()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pooling.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pooling.h new file mode 100644 index 0000000000000000000000000000000000000000..a4e130eef8d8eb37fb0dce4e87c8ee8c57f26d47 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/pooling.h @@ -0,0 +1,1098 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor avg_pool1d( + const Tensor& input, + ExpandingArray<1> kernel_size, + ExpandingArray<1> stride, + ExpandingArray<1> padding, + bool ceil_mode, + bool count_include_pad) { + return torch::avg_pool1d( + input, kernel_size, stride, padding, ceil_mode, count_include_pad); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool1d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::AvgPool1dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2)); +/// ``` +inline Tensor avg_pool1d( + const Tensor& input, + const AvgPool1dFuncOptions& options) { + return avg_pool1d( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.ceil_mode(), + options.count_include_pad()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor avg_pool2d( + const Tensor& input, + ExpandingArray<2> kernel_size, + ExpandingArray<2> stride, + ExpandingArray<2> padding, + bool ceil_mode, + bool count_include_pad, + c10::optional divisor_override) { + return torch::avg_pool2d( + input, + kernel_size, + stride, + padding, + ceil_mode, + count_include_pad, + divisor_override); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool2d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::AvgPool2dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2)); +/// ``` +inline Tensor avg_pool2d( + const Tensor& input, + const AvgPool2dFuncOptions& options) { + return detail::avg_pool2d( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.ceil_mode(), + options.count_include_pad(), + options.divisor_override()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor avg_pool3d( + const Tensor& input, + ExpandingArray<3> kernel_size, + ExpandingArray<3> stride, + ExpandingArray<3> padding, + bool ceil_mode, + bool count_include_pad, + c10::optional divisor_override) { + return torch::avg_pool3d( + input, + kernel_size, + stride, + padding, + ceil_mode, + count_include_pad, + divisor_override); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool3d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::AvgPool3dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2)); +/// ``` +inline Tensor avg_pool3d( + const Tensor& input, + const AvgPool3dFuncOptions& options) { + return detail::avg_pool3d( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.ceil_mode(), + options.count_include_pad(), + options.divisor_override()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor max_pool1d( + const Tensor& input, + ExpandingArray<1> kernel_size, + ExpandingArray<1> stride, + ExpandingArray<1> padding, + ExpandingArray<1> dilation, + bool ceil_mode) { + return torch::max_pool1d( + input, kernel_size, stride, padding, dilation, ceil_mode); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool1d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MaxPool1dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2)); +/// ``` +inline Tensor max_pool1d( + const Tensor& input, + const MaxPool1dFuncOptions& options) { + return detail::max_pool1d( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.dilation(), + options.ceil_mode()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple max_pool1d_with_indices( + const Tensor& input, + ExpandingArray<1> kernel_size, + ExpandingArray<1> stride, + ExpandingArray<1> padding, + ExpandingArray<1> dilation, + bool ceil_mode) { + return torch::max_pool1d_with_indices( + input, kernel_size, stride, padding, dilation, ceil_mode); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for `torch::nn::functional::MaxPool1dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool1d_with_indices(x, F::MaxPool1dFuncOptions(3).stride(2)); +/// ``` +inline std::tuple max_pool1d_with_indices( + const Tensor& input, + const MaxPool1dFuncOptions& options) { + return detail::max_pool1d_with_indices( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.dilation(), + options.ceil_mode()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor max_pool2d( + const Tensor& input, + ExpandingArray<2> kernel_size, + ExpandingArray<2> stride, + ExpandingArray<2> padding, + ExpandingArray<2> dilation, + bool ceil_mode) { + return torch::max_pool2d( + input, kernel_size, stride, padding, dilation, ceil_mode); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool2d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MaxPool2dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2)); +/// ``` +inline Tensor max_pool2d( + const Tensor& input, + const MaxPool2dFuncOptions& options) { + return detail::max_pool2d( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.dilation(), + options.ceil_mode()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple max_pool2d_with_indices( + const Tensor& input, + ExpandingArray<2> kernel_size, + ExpandingArray<2> stride, + ExpandingArray<2> padding, + ExpandingArray<2> dilation, + bool ceil_mode) { + return torch::max_pool2d_with_indices( + input, kernel_size, stride, padding, dilation, ceil_mode); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for `torch::nn::functional::MaxPool2dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool2d_with_indices(x, F::MaxPool2dFuncOptions(3).stride(2)); +/// ``` +inline std::tuple max_pool2d_with_indices( + const Tensor& input, + const MaxPool2dFuncOptions& options) { + return detail::max_pool2d_with_indices( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.dilation(), + options.ceil_mode()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor max_pool3d( + const Tensor& input, + ExpandingArray<3> kernel_size, + ExpandingArray<3> stride, + ExpandingArray<3> padding, + ExpandingArray<3> dilation, + bool ceil_mode) { + return torch::max_pool3d( + input, kernel_size, stride, padding, dilation, ceil_mode); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool3d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MaxPool3dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2)); +/// ``` +inline Tensor max_pool3d( + const Tensor& input, + const MaxPool3dFuncOptions& options) { + return detail::max_pool3d( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.dilation(), + options.ceil_mode()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple max_pool3d_with_indices( + const Tensor& input, + ExpandingArray<3> kernel_size, + ExpandingArray<3> stride, + ExpandingArray<3> padding, + ExpandingArray<3> dilation, + bool ceil_mode) { + return torch::max_pool3d_with_indices( + input, kernel_size, stride, padding, dilation, ceil_mode); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for `torch::nn::functional::MaxPool3dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool3d_with_indices(x, F::MaxPool3dFuncOptions(3).stride(2)); +/// ``` +inline std::tuple max_pool3d_with_indices( + const Tensor& input, + const MaxPool3dFuncOptions& options) { + return detail::max_pool3d_with_indices( + input, + options.kernel_size(), + options.stride(), + options.padding(), + options.dilation(), + options.ceil_mode()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple adaptive_max_pool1d_with_indices( + const Tensor& input, + ExpandingArray<1> output_size) { + return torch::adaptive_max_pool1d(input, output_size); +} +} // namespace detail + +/// See the documentation for +/// `torch::nn::functional::AdaptiveMaxPool1dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool1d_with_indices(x, F::AdaptiveMaxPool1dFuncOptions(3)); +/// ``` +inline std::tuple adaptive_max_pool1d_with_indices( + const Tensor& input, + const AdaptiveMaxPool1dFuncOptions& options) { + return detail::adaptive_max_pool1d_with_indices(input, options.output_size()); +} + +namespace detail { +inline Tensor adaptive_max_pool1d( + const Tensor& input, + ExpandingArray<1> output_size) { + return std::get<0>(adaptive_max_pool1d_with_indices(input, output_size)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool1d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::AdaptiveMaxPool1dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3)); +/// ``` +inline Tensor adaptive_max_pool1d( + const Tensor& input, + const AdaptiveMaxPool1dFuncOptions& options) { + return detail::adaptive_max_pool1d(input, options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple adaptive_max_pool2d_with_indices( + const Tensor& input, + ExpandingArrayWithOptionalElem<2> output_size) { + auto output_size_ = + torch::nn::modules::utils::_list_with_default(output_size, input.sizes()); + return torch::adaptive_max_pool2d(input, output_size_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for +/// `torch::nn::functional::AdaptiveMaxPool2dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool2d_with_indices(x, F::AdaptiveMaxPool2dFuncOptions(3)); +/// ``` +inline std::tuple adaptive_max_pool2d_with_indices( + const Tensor& input, + const AdaptiveMaxPool2dFuncOptions& options) { + return detail::adaptive_max_pool2d_with_indices(input, options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor adaptive_max_pool2d( + const Tensor& input, + ExpandingArrayWithOptionalElem<2> output_size) { + return std::get<0>(adaptive_max_pool2d_with_indices(input, output_size)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool2d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::AdaptiveMaxPool2dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3)); +/// ``` +inline Tensor adaptive_max_pool2d( + const Tensor& input, + const AdaptiveMaxPool2dFuncOptions& options) { + return detail::adaptive_max_pool2d(input, options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple adaptive_max_pool3d_with_indices( + const Tensor& input, + ExpandingArrayWithOptionalElem<3> output_size) { + auto output_size_ = + torch::nn::modules::utils::_list_with_default(output_size, input.sizes()); + return torch::adaptive_max_pool3d(input, output_size_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for +/// `torch::nn::functional::AdaptiveMaxPool3dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool3d_with_indices(x, F::AdaptiveMaxPool3dFuncOptions(3)); +/// ``` +inline std::tuple adaptive_max_pool3d_with_indices( + const Tensor& input, + const AdaptiveMaxPool3dFuncOptions& options) { + return detail::adaptive_max_pool3d_with_indices(input, options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor adaptive_max_pool3d( + const Tensor& input, + ExpandingArrayWithOptionalElem<3> output_size) { + return std::get<0>(adaptive_max_pool3d_with_indices(input, output_size)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool3d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::AdaptiveMaxPool3dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3)); +/// ``` +inline Tensor adaptive_max_pool3d( + const Tensor& input, + const AdaptiveMaxPool3dFuncOptions& options) { + return detail::adaptive_max_pool3d(input, options.output_size()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor adaptive_avg_pool1d( + const Tensor& input, + ExpandingArray<1> output_size) { + return torch::adaptive_avg_pool1d(input, output_size); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool1d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::AdaptiveAvgPool1dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3)); +/// ``` +inline Tensor adaptive_avg_pool1d( + const Tensor& input, + const AdaptiveAvgPool1dFuncOptions& options) { + return detail::adaptive_avg_pool1d(input, options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor adaptive_avg_pool2d( + const Tensor& input, + ExpandingArrayWithOptionalElem<2> output_size) { + auto output_size_ = + torch::nn::modules::utils::_list_with_default(output_size, input.sizes()); + return torch::adaptive_avg_pool2d(input, output_size_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool2d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::AdaptiveAvgPool2dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3)); +/// ``` +inline Tensor adaptive_avg_pool2d( + const Tensor& input, + const AdaptiveAvgPool2dFuncOptions& options) { + return detail::adaptive_avg_pool2d(input, options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor adaptive_avg_pool3d( + const Tensor& input, + ExpandingArrayWithOptionalElem<3> output_size) { + auto output_size_ = + torch::nn::modules::utils::_list_with_default(output_size, input.sizes()); + return torch::adaptive_avg_pool3d(input, output_size_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool3d +/// about the exact behavior of this functional. +/// +/// See the documentation for +/// `torch::nn::functional::AdaptiveAvgPool3dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3)); +/// ``` +inline Tensor adaptive_avg_pool3d( + const Tensor& input, + const AdaptiveAvgPool3dFuncOptions& options) { + return detail::adaptive_avg_pool3d(input, options.output_size()); +} + +// ============================================================================ + +inline std::vector _unpool_output_size( + const Tensor& input, + const IntArrayRef& kernel_size, + const IntArrayRef& stride, + const IntArrayRef& padding, + const c10::optional>& output_size) { + auto input_size = input.sizes(); + std::vector default_size; + for (const auto d : c10::irange(kernel_size.size())) { + default_size.push_back( + (input_size[input_size.size() - kernel_size.size() + d] - 1) * + stride[d] + + kernel_size[d] - 2 * padding[d]); + } + if (!output_size) { + return default_size; + } else { + std::vector output_size_; + if (output_size->size() == kernel_size.size() + 2) { + output_size_ = IntArrayRef(*output_size).slice(2).vec(); + } + if (output_size_.size() != kernel_size.size()) { + TORCH_CHECK( + false, + "output_size should be a sequence containing ", + kernel_size.size(), + " or ", + kernel_size.size() + 2, + " elements, but it has a length of '", + output_size_.size(), + "'"); + } + for (const auto d : c10::irange(kernel_size.size())) { + const auto min_size = default_size[d] - stride[d]; + const auto max_size = default_size[d] + stride[d]; + if (!(min_size <= output_size_[d] && output_size_[d] <= max_size)) { + TORCH_CHECK( + false, + "invalid output_size ", + output_size_, + " (dim ", + d, + " must be between ", + min_size, + " and ", + max_size, + ")"); + } + } + return output_size_; + } +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor max_unpool1d( + const Tensor& input, + const Tensor& indices, + ExpandingArray<1> kernel_size, + ExpandingArray<1> stride, + ExpandingArray<1> padding, + const c10::optional>& output_size) { + auto output_size_ = + _unpool_output_size(input, kernel_size, stride, padding, output_size); + output_size_.push_back(1); + return torch::max_unpool2d( + input.unsqueeze(-1), indices.unsqueeze(-1), output_size_) + .squeeze(-1); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool1d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MaxUnpool1dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_unpool1d(x, indices, +/// F::MaxUnpool1dFuncOptions(3).stride(2).padding(1)); +/// ``` +inline Tensor max_unpool1d( + const Tensor& input, + const Tensor& indices, + const MaxUnpool1dFuncOptions& options) { + return detail::max_unpool1d( + input, + indices, + options.kernel_size(), + options.stride(), + options.padding(), + options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor max_unpool2d( + const Tensor& input, + const Tensor& indices, + ExpandingArray<2> kernel_size, + ExpandingArray<2> stride, + ExpandingArray<2> padding, + const c10::optional>& output_size) { + auto output_size_ = + _unpool_output_size(input, kernel_size, stride, padding, output_size); + + return torch::max_unpool2d(input, indices, output_size_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool2d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MaxUnpool2dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_unpool2d(x, indices, +/// F::MaxUnpool2dFuncOptions(3).stride(2).padding(1)); +/// ``` +inline Tensor max_unpool2d( + const Tensor& input, + const Tensor& indices, + const MaxUnpool2dFuncOptions& options) { + return detail::max_unpool2d( + input, + indices, + options.kernel_size(), + options.stride(), + options.padding(), + options.output_size()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor max_unpool3d( + const Tensor& input, + const Tensor& indices, + ExpandingArray<3> kernel_size, + ExpandingArray<3> stride, + ExpandingArray<3> padding, + const c10::optional>& output_size) { + auto output_size_ = + _unpool_output_size(input, kernel_size, stride, padding, output_size); + + return torch::max_unpool3d(input, indices, output_size_, stride, padding); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool3d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::MaxUnpool3dFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3)); +/// ``` +inline Tensor max_unpool3d( + const Tensor& input, + const Tensor& indices, + const MaxUnpool3dFuncOptions& options) { + return detail::max_unpool3d( + input, + indices, + options.kernel_size(), + options.stride(), + options.padding(), + options.output_size()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple fractional_max_pool2d_with_indices( + const Tensor& input, + const ExpandingArray<2>& kernel_size, + const c10::optional>& output_size, + const c10::optional>& output_ratio, + const Tensor& _random_samples) { + if (output_size == c10::nullopt && output_ratio == c10::nullopt) { + TORCH_CHECK( + false, + "fractional_max_pool2d requires specifying either ", + "an output_size or an output_ratio"); + } + c10::optional> output_size_ = output_size; + if (output_size_ == c10::nullopt) { + TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt); + output_size_ = { + (int64_t)(static_cast(input.size(-2)) * (*output_ratio.value())[0]), + (int64_t)(static_cast(input.size(-1)) * (*output_ratio.value())[1])}; + } + + Tensor _random_samples_ = _random_samples; + if (!_random_samples_.defined()) { + auto n_batch = input.dim() == 3 ? 1 : input.size(0); + _random_samples_ = torch::rand( + {n_batch, input.size(-3), 2}, + torch::TensorOptions().dtype(input.dtype()).device(input.device())); + } + return torch::fractional_max_pool2d( + input, kernel_size, *output_size_, _random_samples_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for +/// `torch::nn::functional::FractionalMaxPool2dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fractional_max_pool2d_with_indices(x, +/// F::FractionalMaxPool2dFuncOptions(3).output_size(2)); +/// ``` +inline std::tuple fractional_max_pool2d_with_indices( + const Tensor& input, + const FractionalMaxPool2dFuncOptions& options) { + return detail::fractional_max_pool2d_with_indices( + input, + options.kernel_size(), + options.output_size(), + options.output_ratio(), + options._random_samples()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor fractional_max_pool2d( + const Tensor& input, + ExpandingArray<2> kernel_size, + c10::optional> output_size, + c10::optional> output_ratio, + const Tensor& _random_samples) { + return std::get<0>(fractional_max_pool2d_with_indices( + input, kernel_size, output_size, output_ratio, _random_samples)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for +/// `torch::nn::functional::FractionalMaxPool2dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fractional_max_pool2d(x, +/// F::FractionalMaxPool2dFuncOptions(3).output_size(2)); +/// ``` +inline Tensor fractional_max_pool2d( + const Tensor& input, + const FractionalMaxPool2dFuncOptions& options) { + return detail::fractional_max_pool2d( + input, + options.kernel_size(), + options.output_size(), + options.output_ratio(), + options._random_samples()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline std::tuple fractional_max_pool3d_with_indices( + const Tensor& input, + const ExpandingArray<3>& kernel_size, + const c10::optional>& output_size, + const c10::optional>& output_ratio, + const Tensor& _random_samples) { + if (output_size == c10::nullopt && output_ratio == c10::nullopt) { + TORCH_CHECK( + false, + "fractional_max_pool3d requires specifying either ", + "an output_size or an output_ratio"); + } + + c10::optional> output_size_ = output_size; + if (output_size_ == c10::nullopt) { + TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt); + output_size_ = { + (int64_t)(static_cast(input.size(-3)) * (*output_ratio.value())[0]), + (int64_t)(static_cast(input.size(-2)) * (*output_ratio.value())[1]), + (int64_t)(static_cast(input.size(-1)) * (*output_ratio.value())[2])}; + } + + Tensor _random_samples_ = _random_samples; + if (!_random_samples_.defined()) { + auto n_batch = input.dim() == 4 ? 1 : input.size(0); + _random_samples_ = torch::rand( + {n_batch, input.size(-4), 3}, + torch::TensorOptions().dtype(input.dtype()).device(input.device())); + } + return torch::fractional_max_pool3d( + input, kernel_size, *output_size_, _random_samples_); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for +/// `torch::nn::functional::FractionalMaxPool3dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fractional_max_pool3d_with_indices(x, +/// F::FractionalMaxPool3dFuncOptions(3).output_size(2)); +/// ``` +inline std::tuple fractional_max_pool3d_with_indices( + const Tensor& input, + const FractionalMaxPool3dFuncOptions& options) { + return detail::fractional_max_pool3d_with_indices( + input, + options.kernel_size(), + options.output_size(), + options.output_ratio(), + options._random_samples()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor fractional_max_pool3d( + const Tensor& input, + ExpandingArray<3> kernel_size, + c10::optional> output_size, + c10::optional> output_ratio, + const Tensor& _random_samples) { + return std::get<0>(fractional_max_pool3d_with_indices( + input, kernel_size, output_size, output_ratio, _random_samples)); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See the documentation for +/// `torch::nn::functional::FractionalMaxPool3dFuncOptions` class to learn what +/// optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fractional_max_pool3d(x, +/// F::FractionalMaxPool3dFuncOptions(3).output_size(2)); +/// ``` +inline Tensor fractional_max_pool3d( + const Tensor& input, + const FractionalMaxPool3dFuncOptions& options) { + return detail::fractional_max_pool3d( + input, + options.kernel_size(), + options.output_size(), + options.output_ratio(), + options._random_samples()); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor lp_pool1d( + const Tensor& input, + double norm_type, + ExpandingArray<1> kernel_size, + ExpandingArray<1> stride, + bool ceil_mode) { + Tensor out = detail::avg_pool1d( + input.pow(norm_type), + kernel_size, + stride, + /*padding=*/0, + ceil_mode, + /*count_include_pad=*/true); + + return (torch::sign(out) * relu(torch::abs(out))) + .mul((*kernel_size)[0]) + .pow(1. / norm_type); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool1d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::LPPool1dFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2)); +/// ``` +inline Tensor lp_pool1d( + const Tensor& input, + const LPPool1dFuncOptions& options) { + return detail::lp_pool1d( + input, + options.norm_type(), + options.kernel_size(), + options.stride(), + options.ceil_mode()); +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor lp_pool2d( + const Tensor& input, + double norm_type, + ExpandingArray<2> kernel_size, + ExpandingArray<2> stride, + bool ceil_mode) { + int kw = (*kernel_size)[0]; + int kh = (*kernel_size)[1]; + Tensor out = detail::avg_pool2d( + input.pow(norm_type), + kernel_size, + stride, + /*padding=*/0, + ceil_mode, + /*count_include_pad=*/true, + /*divisor_override=*/c10::nullopt); + + return (torch::sign(out) * relu(torch::abs(out))) + .mul(kw * kh) + .pow(1. / norm_type); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool2d +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::LPPool2dFuncOptions` class +/// to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2)); +/// ``` +inline Tensor lp_pool2d( + const Tensor& input, + const LPPool2dFuncOptions& options) { + return detail::lp_pool2d( + input, + options.norm_type(), + options.kernel_size(), + options.stride(), + options.ceil_mode()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/vision.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/vision.h new file mode 100644 index 0000000000000000000000000000000000000000..4aa1d3fe0a477c5865b7d40bbd21f3b5ff7bcfec --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional/vision.h @@ -0,0 +1,124 @@ +#pragma once + +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +inline Tensor affine_grid( + const Tensor& theta, + const IntArrayRef& size, + bool align_corners = false) { + // enforce floating point dtype on theta + TORCH_CHECK( + theta.is_floating_point(), + "Expected theta to have floating point type, but got ", + theta.dtype()); + + // check that shapes and sizes match + if (size.size() == 4) { + TORCH_CHECK( + theta.dim() == 3 && theta.size(-2) == 2 && theta.size(-1) == 3, + "Expected a batch of 2D affine matrices of shape Nx2x3 for size ", + size, + ". Got ", + theta.sizes(), + "."); + } else if (size.size() == 5) { + TORCH_CHECK( + theta.dim() == 3 && theta.size(-2) == 3 && theta.size(-1) == 4, + "Expected a batch of 3D affine matrices of shape Nx3x4 for size ", + size, + ". Got ", + theta.sizes(), + "."); + } else { + TORCH_CHECK( + false, + "affine_grid only supports 4D and 5D sizes, ", + "for 2D and 3D affine transforms, respectively. ", + "Got size ", + size); + } + + if (*std::min_element(size.begin(), size.end()) <= 0) { + TORCH_CHECK(false, "Expected non-zero, positive output size. Got ", size); + } + + return torch::affine_grid_generator(theta, size, align_corners); +} + +// ============================================================================ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { +inline Tensor grid_sample( + const Tensor& input, + const Tensor& grid, + GridSampleFuncOptions::mode_t mode, + GridSampleFuncOptions::padding_mode_t padding_mode, + c10::optional align_corners) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int64_t mode_enum, padding_mode_enum; + + if (std::holds_alternative(mode)) { + mode_enum = 0; + } else if (std::holds_alternative(mode)) { + mode_enum = 1; + } else { /// mode == 'bicubic' + mode_enum = 2; + } + + if (std::holds_alternative(padding_mode)) { + padding_mode_enum = 0; + } else if (std::holds_alternative(padding_mode)) { + padding_mode_enum = 1; + } else { /// padding_mode == 'reflection' + padding_mode_enum = 2; + } + + if (!align_corners.has_value()) { + TORCH_WARN( + "Default grid_sample and affine_grid behavior has changed ", + "to align_corners=False since 1.3.0. Please specify ", + "align_corners=True if the old behavior is desired. ", + "See the documentation of grid_sample for details."); + align_corners = false; + } + + return torch::grid_sampler( + input, grid, mode_enum, padding_mode_enum, align_corners.value()); +} +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// See +/// https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.grid_sample +/// about the exact behavior of this functional. +/// +/// See the documentation for `torch::nn::functional::GridSampleFuncOptions` +/// class to learn what optional arguments are supported for this functional. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::grid_sample(input, grid, +/// F::GridSampleFuncOptions().mode(torch::kBilinear).padding_mode(torch::kZeros).align_corners(true)); +/// ``` +inline Tensor grid_sample( + const Tensor& input, + const Tensor& grid, + const GridSampleFuncOptions& options = {}) { + return detail::grid_sample( + input, + grid, + options.mode(), + options.padding_mode(), + options.align_corners()); +} + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h new file mode 100644 index 0000000000000000000000000000000000000000..d08d785f1dade72ccc4bc28a48c1d8014c257c6c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/init.h @@ -0,0 +1,124 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { +namespace init { + +using NonlinearityType = std::variant< + enumtype::kLinear, + enumtype::kConv1D, + enumtype::kConv2D, + enumtype::kConv3D, + enumtype::kConvTranspose1D, + enumtype::kConvTranspose2D, + enumtype::kConvTranspose3D, + enumtype::kSigmoid, + enumtype::kTanh, + enumtype::kReLU, + enumtype::kLeakyReLU>; + +using FanModeType = std::variant; + +} // namespace init +} // namespace nn + +namespace nn { +namespace init { + +/// Return the recommended gain value for the given nonlinearity function. +TORCH_API double calculate_gain( + NonlinearityType nonlinearity, + double param = 0.01); + +/// Fills the given `tensor` with the provided `value` in-place, and returns it. +/// No gradient will be recorded for this operation. +TORCH_API Tensor constant_(Tensor tensor, Scalar value); + +/// Fills the given `tensor` with the Dirac delta function in-place, and returns +/// it. No gradient will be recorded for this operation. +TORCH_API Tensor dirac_(Tensor tensor); + +/// Fills the given 2-dimensional `matrix` with an identity matrix. +/// No gradient will be recorded for this operation. +TORCH_API Tensor eye_(Tensor matrix); + +/// Fills the given 2-dimensional `matrix` with values drawn from a normal +/// distribution parameterized by `mean` and `std`. +/// No gradient will be recorded for this operation. +TORCH_API Tensor normal_(Tensor tensor, double mean = 0, double std = 1); + +/// Fills the given `tensor` with ones. +/// No gradient will be recorded for this operation. +TORCH_API Tensor ones_(Tensor tensor); + +/// Fills the input `Tensor` with a (semi) orthogonal matrix, as described in +/// "Exact solutions to the nonlinear dynamics of learning in deep linear neural +/// networks" - Saxe, A. et al. (2013). The input tensor must have at least 2 +/// dimensions, and for tensors with more than 2 dimensions the trailing +/// dimensions are flattened. +/// No gradient will be recorded for this operation. +TORCH_API Tensor orthogonal_(Tensor tensor, double gain = 1.0); + +/// Fills the 2D input `Tensor` as a sparse matrix, where the +/// non-zero elements will be drawn from a centered normal distribution +/// with the given standard deviation `std`, as described in "Deep learning via +/// Hessian-free optimization" - Martens, J. (2010). The `sparsity` is a real +/// value between 0 and 1 that controls the fraction of elements in each column +/// to be set to zero. +/// No gradient will be recorded for this operation. +TORCH_API Tensor sparse_(Tensor tensor, double sparsity, double std = 0.01); + +/// Fills the given 2-dimensional `matrix` with values drawn from a uniform +/// distribution parameterized by `low` and `high`. +/// No gradient will be recorded for this operation. +TORCH_API Tensor uniform_(Tensor tensor, double low = 0, double high = 1); + +/// Fills the input `Tensor` with values according to the method +/// described in "Delving deep into rectifiers: Surpassing human-level +/// performance on ImageNet classification" - He, K. et al. (2015), using a +/// normal distribution. Also known as He initialization. +/// No gradient will be recorded for this operation. +TORCH_API Tensor kaiming_normal_( + Tensor tensor, + double a = 0, + FanModeType mode = torch::kFanIn, + NonlinearityType nonlinearity = torch::kLeakyReLU); + +/// Fills the input `Tensor` with values according to the method +/// described in "Delving deep into rectifiers: Surpassing human-level +/// performance on ImageNet classification" - He, K. et al. (2015), using a +/// uniform distribution. Also known as He initialization. +/// No gradient will be recorded for this operation. +TORCH_API Tensor kaiming_uniform_( + Tensor tensor, + double a = 0, + FanModeType mode = torch::kFanIn, + NonlinearityType nonlinearity = torch::kLeakyReLU); + +/// Fills the input `Tensor` with values according to the method +/// described in "Understanding the difficulty of training deep feedforward +/// neural networks" - Glorot, X. & Bengio, Y. (2010). Values are scaled by the +/// `gain` parameter. No gradient will be recorded for this operation. +TORCH_API Tensor xavier_normal_(Tensor tensor, double gain = 1.0); + +/// Fills the input `Tensor` with values according to the method +/// described in "Understanding the difficulty of training deep feedforward +/// neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform +/// distribution. Values are scaled by the `gain` parameter +/// No gradient will be recorded for this operation. +TORCH_API Tensor xavier_uniform_(Tensor tensor, double gain = 1.0); + +/// Fills the given `tensor` with zeros. +/// No gradient will be recorded for this operation. +TORCH_API Tensor zeros_(Tensor tensor); + +TORCH_API std::tuple _calculate_fan_in_and_fan_out( + const Tensor& tensor); + +} // namespace init +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h new file mode 100644 index 0000000000000000000000000000000000000000..de8d243533a787b1ed10ea5c90ef3286756177cd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/module.h @@ -0,0 +1,702 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// The base class for all modules in PyTorch. +/// +/// \rst +/// .. note:: +/// The design and implementation of this class is largely based on the Python +/// API. You may want to consult the python documentation for +/// :py:class:`pytorch:torch.nn.Module` for further clarification on certain +/// methods or behavior. +/// \endrst +/// +/// A `Module` is an abstraction over the implementation of some function or +/// algorithm, possibly associated with some persistent data. A `Module` may +/// contain further `Module`s ("submodules"), each with their own +/// implementation, persistent data and further submodules. `Module`s can thus +/// be said to form a recursive tree structure. A `Module` is registered as a +/// submodule to another `Module` by calling `register_module()`, typically from +/// within a parent module's constructor. +/// +/// A distinction is made between three kinds of persistent data that may be +/// associated with a `Module`: +/// +/// 1. *Parameters*: tensors that record gradients, typically weights updated +/// during the backward step (e.g. the `weight` of a `Linear` module), +/// 2. *Buffers*: tensors that do not record gradients, typically updated during +/// the forward step, such as running statistics (e.g. `mean` and `variance` +/// in the `BatchNorm` module), +/// 3. Any additional state, not necessarily tensors, required for the +/// implementation or configuration of a `Module`. +/// +/// The first two kinds of state are special in that they may be registered +/// with the `Module` system to allow convenient access and batch configuration. +/// For example, registered parameters in any `Module` may be iterated over via +/// the `parameters()` accessor. Further, changing the data type of a `Module`'s +/// registered parameters can be done conveniently via `Module::to()`, e.g. +/// `module->to(torch::kCUDA)` to move all parameters to GPU memory. Lastly, +/// registered parameters and buffers are handled specially during a `clone()` +/// operation, which performs a deepcopy of a cloneable `Module` hierarchy. +/// +/// Parameters are registered with a `Module` via `register_parameter`. Buffers +/// are registered separately via `register_buffer`. These methods are part of +/// the public API of `Module` and are typically invoked from within a +/// concrete `Module`s constructor. +class TORCH_API Module : public std::enable_shared_from_this { + public: + using ModuleApplyFunction = std::function; + using ConstModuleApplyFunction = std::function; + using NamedModuleApplyFunction = + std::function; + using ConstNamedModuleApplyFunction = + std::function; + using ModulePointerApplyFunction = + std::function&)>; + using NamedModulePointerApplyFunction = + std::function&)>; + + /// Tells the base `Module` about the name of the submodule. + explicit Module(std::string name); + + /// Constructs the module without immediate knowledge of the submodule's name. + /// The name of the submodule is inferred via RTTI (if possible) the first + /// time `.name()` is invoked. + Module(); + Module(const Module&) = default; + Module& operator=(const Module&) = default; + Module(Module&&) noexcept = default; + Module& operator=(Module&&) noexcept = default; + + virtual ~Module() = default; + + /// Returns the name of the `Module`. + /// + /// A `Module` has an associated `name`, which is a string representation of + /// the kind of concrete `Module` it represents, such as `"Linear"` for the + /// `Linear` module. Under most circumstances, this name is automatically + /// inferred via runtime type information (RTTI). In the unusual circumstance + /// that you have this feature disabled, you may want to manually name your + /// `Module`s by passing the string name to the `Module` base class' + /// constructor. + const std::string& name() const noexcept; + + /// Performs a recursive deep copy of the module and all its registered + /// parameters, buffers and submodules. + /// + /// Optionally, this method sets the current device + /// to the one supplied before cloning. If no device is given, each + /// parameter and buffer will be moved to the device of its source. + /// + /// \rst + /// .. attention:: + /// Attempting to call the `clone()` method inherited from the base `Module` + /// class (the one documented here) will fail. To inherit an actual + /// implementation of `clone()`, you must subclass `Cloneable`. `Cloneable` + /// is templatized on the concrete module type, and can thus properly copy a + /// `Module`. This method is provided on the base class' API solely for an + /// easier-to-use polymorphic interface. + /// \endrst + virtual std::shared_ptr clone( + const optional& device = nullopt) const; + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `Module&`. + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](nn::Module& module) { + /// std::cout << module.name() << std::endl; + /// }); + /// \endrst + void apply(const ModuleApplyFunction& function); + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const Module&`. + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const nn::Module& module) { + /// std::cout << module.name() << std::endl; + /// }); + /// \endrst + void apply(const ConstModuleApplyFunction& function) const; + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const std::string&` for the key of the module, + /// and a `Module&`. The key of the module itself is the empty string. If + /// `name_prefix` is given, it is prepended to every key as + /// `.` (and just `name_prefix` for the module itself). + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const std::string& key, nn::Module& module) { + /// std::cout << key << ": " << module.name() << std::endl; + /// }); + /// \endrst + void apply( + const NamedModuleApplyFunction& function, + const std::string& name_prefix = std::string()); + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const std::string&` for the key of the module, + /// and a `const Module&`. The key of the module itself is the empty string. + /// If `name_prefix` is given, it is prepended to every key as + /// `.` (and just `name_prefix` for the module itself). + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const std::string& key, const nn::Module& module) { + /// std::cout << key << ": " << module.name() << std::endl; + /// }); + /// \endrst + void apply( + const ConstNamedModuleApplyFunction& function, + const std::string& name_prefix = std::string()) const; + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const std::shared_ptr&`. + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const std::shared_ptr& module) { + /// std::cout << module->name() << std::endl; + /// }); + /// \endrst + void apply(const ModulePointerApplyFunction& function) const; + + /// Applies the `function` to the `Module` and recursively to every submodule. + /// The function must accept a `const std::string&` for the key of the module, + /// and a `const std::shared_ptr&`. The key of the module itself is + /// the empty string. If `name_prefix` is given, it is prepended to every key + /// as + /// `.` (and just `name_prefix` for the module itself). + /// + /// \rst + /// .. code-block:: cpp + /// MyModule module; + /// module->apply([](const std::string& key, + /// const std::shared_ptr& module) { + /// std::cout << key << ": " << module->name() << std::endl; + /// }); + /// \endrst + void apply( + const NamedModulePointerApplyFunction& function, + const std::string& name_prefix = std::string()) const; + + /// Returns the parameters of this `Module` and if `recurse` is true, also + /// recursively of every submodule. + std::vector parameters(bool recurse = true) const; + + /// Returns an `OrderedDict` with the parameters of this `Module` along with + /// their keys, and if `recurse` is true also recursively of every submodule. + OrderedDict named_parameters(bool recurse = true) const; + + /// Returns the buffers of this `Module` and if `recurse` is true, also + /// recursively of every submodule. + std::vector buffers(bool recurse = true) const; + + /// Returns an `OrderedDict` with the buffers of this `Module` along with + /// their keys, and if `recurse` is true also recursively of every submodule. + OrderedDict named_buffers(bool recurse = true) const; + + /// Returns the submodules of this `Module` (the entire submodule hierarchy) + /// and if `include_self` is true, also inserts a `shared_ptr` to this module + /// in the first position. + /// + /// \rst + /// .. warning:: + /// Only pass `include_self` as `true` if this `Module` is stored in a + /// `shared_ptr`! Otherwise an exception will be thrown. You may still call + /// this method with `include_self` set to false if your `Module` is not + /// stored in a `shared_ptr`. + /// \endrst + std::vector> modules(bool include_self = true) const; + + /// Returns an `OrderedDict` of the submodules of this `Module` (the entire + /// submodule hierarchy) and their keys, and if `include_self` is true, also + /// inserts a `shared_ptr` to this module in the first position. If + /// `name_prefix` is given, it is prepended to every key as + /// `.` (and just `name_prefix` for the module itself). + /// + /// \rst + /// .. warning:: + /// Only pass `include_self` as `true` if this `Module` is stored in a + /// `shared_ptr`! Otherwise an exception will be thrown. You may still call + /// this method with `include_self` set to false if your `Module` is not + /// stored in a `shared_ptr`. + /// \endrst + OrderedDict> named_modules( + const std::string& name_prefix = std::string(), + bool include_self = true) const; + + /// Returns the direct submodules of this `Module`. + std::vector> children() const; + + /// Returns an `OrderedDict` of the direct submodules of this `Module` and + /// their keys. + OrderedDict> named_children() const; + + /// Enables "training" mode. + virtual void train(bool on = true); + + /// Calls train(false) to enable "eval" mode. + /// Do not override this method, override `train()` instead. + void eval(); + + /// True if the module is in training mode. + /// + /// Every `Module` has a boolean associated with it that determines whether + /// the `Module` is currently in *training* mode (set via `.train()`) or in + /// *evaluation* (inference) mode (set via `.eval()`). This property is + /// exposed via `is_training()`, and may be used by the implementation of a + /// concrete module to modify its runtime behavior. See the `BatchNorm` or + /// `Dropout` modules for examples of `Module`s that use different code paths + /// depending on this property. + virtual bool is_training() const noexcept; + + /// Recursively casts all parameters to the given `dtype` and `device`. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + virtual void to( + torch::Device device, + torch::Dtype dtype, + bool non_blocking = false); + + /// Recursively casts all parameters to the given dtype. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + virtual void to(torch::Dtype dtype, bool non_blocking = false); + + /// Recursively moves all parameters to the given device. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + virtual void to(torch::Device device, bool non_blocking = false); + + /// Recursively zeros out the `grad` value of each registered parameter. + virtual void zero_grad(bool set_to_none = true); + + /// Attempts to cast this `Module` to the given `ModuleType`. + /// + /// This method is useful when calling `apply()`. + /// \rst + /// .. code-block:: cpp + /// + /// void initialize_weights(nn::Module& module) { + /// torch::NoGradGuard no_grad; + /// if (auto* linear = module.as()) { + /// linear->weight.normal_(0.0, 0.02); + /// } + /// } + /// + /// MyModule module; + /// module->apply(initialize_weights); + /// \endrst + template + typename ModuleType::ContainedType* as() noexcept; + + /// Attempts to cast this `Module` to the given `ModuleType`. + /// + /// This method is useful when calling `apply()`. + /// \rst + /// .. code-block:: cpp + /// void initialize_weights(nn::Module& module) { + /// torch::NoGradGuard no_grad; + /// if (auto* linear = module.as()) { + /// linear->weight.normal_(0.0, 0.02); + /// } + /// } + /// + /// MyModule module; + /// module->apply(initialize_weights); + /// \endrst + template + const typename ModuleType::ContainedType* as() const noexcept; + + /// Attempts to cast this `Module` to the given `ModuleType`. + /// + /// This method is useful when calling `apply()`. + /// \rst + /// .. code-block:: cpp + /// + /// void initialize_weights(nn::Module& module) { + /// torch::NoGradGuard no_grad; + /// if (auto* linear = module.as()) { + /// linear->weight.normal_(0.0, 0.02); + /// } + /// } + /// + /// MyModule module; + /// module.apply(initialize_weights); + /// \endrst + template < + typename ModuleType, + typename = torch::detail::disable_if_module_holder_t> + ModuleType* as() noexcept; + + /// Attempts to cast this `Module` to the given `ModuleType`. + /// + /// This method is useful when calling `apply()`. + /// \rst + /// .. code-block:: cpp + /// + /// void initialize_weights(nn::Module& module) { + /// torch::NoGradGuard no_grad; + /// if (auto* linear = module.as()) { + /// linear->weight.normal_(0.0, 0.02); + /// } + /// } + /// + /// MyModule module; + /// module.apply(initialize_weights); + /// \endrst + template < + typename ModuleType, + typename = torch::detail::disable_if_module_holder_t> + const ModuleType* as() const noexcept; + + /// Serializes the `Module` into the given `OutputArchive`. + /// + /// If the `Module` contains unserializable submodules (e.g. + /// `nn::Functional`), those submodules are skipped when serializing. + virtual void save(serialize::OutputArchive& archive) const; + + /// Deserializes the `Module` from the given `InputArchive`. + /// + /// If the `Module` contains unserializable submodules (e.g. + /// `nn::Functional`), we don't check the existence of those submodules in the + /// `InputArchive` when deserializing. + virtual void load(serialize::InputArchive& archive); + + /// Streams a pretty representation of the `Module` into the given `stream`. + /// By default, this representation will be the name of the module (taken from + /// `name()`), followed by a recursive pretty print of all of the `Module`'s + /// submodules. + /// + /// Override this method to change the pretty print. The input + /// `stream` should be returned from the method, to allow easy chaining. + virtual void pretty_print(std::ostream& stream) const; + + /// Returns whether the `Module` is serializable. + virtual bool is_serializable() const; + + /// Registers a parameter with this `Module`. + /// + /// A parameter should be any gradient-recording tensor used in the + /// implementation of your `Module`. Registering it makes it available to + /// methods such as `parameters()`, `clone()` or `to().` + /// + /// Note that registering an undefined Tensor (e.g. + /// `module.register_parameter("param", Tensor())`) is allowed, and is + /// equivalent to `module.register_parameter("param", None)` in Python API. + /// + /// \rst + /// .. code-block:: cpp + /// + /// MyModule::MyModule() { + /// weight_ = register_parameter("weight", torch::randn({A, B})); + /// } + /// \endrst + Tensor& register_parameter( + std::string name, + Tensor tensor, + bool requires_grad = true); + + /// Registers a buffer with this `Module`. + /// + /// A buffer is intended to be state in your module that does not record + /// gradients, such as running statistics. Registering it makes it available + /// to methods such as `buffers()`, `clone()` or `to(). + /// + /// \rst + /// .. code-block:: cpp + /// + /// MyModule::MyModule() { + /// mean_ = register_buffer("mean", torch::empty({num_features_})); + /// } + /// \endrst + Tensor& register_buffer(std::string name, Tensor tensor); + + /// Registers a submodule with this `Module`. + /// + /// Registering a module makes it available to methods such as `modules()`, + /// `clone()` or `to()`. + /// + /// \rst + /// .. code-block:: cpp + /// + /// MyModule::MyModule() { + /// submodule_ = register_module("linear", torch::nn::Linear(3, 4)); + /// } + /// \endrst + template + std::shared_ptr register_module( + std::string name, + std::shared_ptr module); + + /// Registers a submodule with this `Module`. + /// + /// This method deals with `ModuleHolder`s. + /// + /// Registering a module makes it available to methods such as `modules()`, + /// `clone()` or `to()`. + /// + /// \rst + /// .. code-block:: cpp + /// + /// MyModule::MyModule() { + /// submodule_ = register_module("linear", torch::nn::Linear(3, 4)); + /// } + /// \endrst + template + std::shared_ptr register_module( + std::string name, + ModuleHolder module_holder); + + /// Replaces a registered submodule with this `Module`. + /// + /// This takes care of the registration, if you used submodule members, you + /// should + // assign the submodule as well, i.e. use as + /// module->submodule_ = module->replace_module("linear", + /// torch::nn::Linear(3, 4)); + /// It only works when a module of the name is already registered. + /// + /// This is useful for replacing a module after initialization, e.g. + /// for finetuning. + template + std::shared_ptr replace_module( + const std::string& name, + std::shared_ptr module); + + /// Replaces a registered submodule with this `Module`. + /// This method deals with `ModuleHolder`s. + /// + /// This takes care of the registration, if you used submodule members, you + /// should + // assign the submodule as well, i.e. use as + /// module->submodule_ = module->replace_module("linear", linear_holder); + /// It only works when a module of the name is already registered. + /// + /// This is useful for replacing a module after initialization, e.g. + /// for finetuning. + template + std::shared_ptr replace_module( + const std::string& name, + ModuleHolder module_holder); + + /// Unregisters a submodule from this `Module`. If there is no such module + /// with `name` an exception is thrown. + void unregister_module(const std::string& name); + + protected: + /// The following three functions allow a module with default arguments in its + /// forward method to be used in a Sequential module. + /// You should NEVER override these functions manually. Instead, you should + /// use the `FORWARD_HAS_DEFAULT_ARGS` macro. + virtual bool _forward_has_default_args() { + return false; + } + + virtual unsigned int _forward_num_required_args() { + TORCH_CHECK( + false, + "torch::nn::Module subclass that has default arguments in `forward` method ", + "must override `_forward_num_required_args` method. Please use ", + "`FORWARD_HAS_DEFAULT_ARGS` macro to do so."); + } + + virtual std::vector _forward_populate_default_args( + std::vector&& arguments) { + TORCH_CHECK( + false, + "torch::nn::Module subclass that has default arguments in `forward` method ", + "must override `_forward_populate_default_args` method. Please use ", + "`FORWARD_HAS_DEFAULT_ARGS` macro to do so."); + } + + /// The registered parameters of this `Module`. + /// Inorder to access parameters_ in ParameterDict and ParameterList + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + OrderedDict parameters_; + + private: + // Friend classes. + + template + friend class Cloneable; + + template + friend struct AnyModuleHolder; + + /// Pretty prints the given `Module` into the `ostream`. + TORCH_API friend std::ostream& operator<<( + std::ostream& stream, + const nn::Module& module); + + // data parallel using this method to configure gradient edges during the + // replicate step. + template + friend void replicate_grad_edges( + const std::shared_ptr& module, + const std::vector>& replicas, + const std::vector& devices); + + // Private methods. + + /// Used in the implementation of `Cloneable`. + virtual void clone_(Module& other, const optional& device); + + /// The implementation of the various `to()` methods. + template + void to_impl(Ts&&... ts); + + /// Implements pretty printing the module hierarchy. + void pretty_print_recursive( + std::ostream& stream, + const std::string& indentation) const; + + /// Applies the `function` to every submodule recursively, starting at this + /// `Module`'s children (thus not including the module itself). + void apply_to_submodules( + const NamedModulePointerApplyFunction& function, + const std::string& name_prefix = std::string()) const; + + /// Returns a shared_ptr to `this` in a safe (checked) way. + std::shared_ptr shared_from_this_checked() const; + + /// The registered buffers of this `Module`. + OrderedDict buffers_; + + /// The registered (direct) submodules of this `Module`. + OrderedDict> children_; + + /// The module's name (e.g. "LSTM"). + mutable optional name_; + + /// Whether the module is in training mode. + bool is_training_{true}; +}; + +/// Serialize a `Module` pointer into an `OutputArchive`. +TORCH_API serialize::OutputArchive& operator<<( + serialize::OutputArchive& archive, + const std::shared_ptr& module); + +/// Deserializes a `Module` from an `InputArchive`. +TORCH_API serialize::InputArchive& operator>>( + serialize::InputArchive& archive, + const std::shared_ptr& module); + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nn::Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template +typename ModuleType::ContainedType* Module::as() noexcept { + // Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for + // `Linear`, since `LinearImpl` inherits `nn::Module`. + return as(); +} + +template +const typename ModuleType::ContainedType* Module::as() const noexcept { + // Use the contained type of the `ModuleHolder`, e.g. `LinearImpl` for + // `Linear`, since `LinearImpl` inherits `nn::Module`. + return as(); +} + +template +ModuleType* Module::as() noexcept { + return dynamic_cast(this); +} + +template +const ModuleType* Module::as() const noexcept { + return dynamic_cast(this); +} + +template +std::shared_ptr Module::register_module( + std::string name, + std::shared_ptr module) { + TORCH_CHECK(!name.empty(), "Submodule name must not be empty"); + TORCH_CHECK( + name.find('.') == std::string::npos, + "Submodule name must not contain a dot (got '", + name, + "')"); + auto& base_module = children_.insert(std::move(name), std::move(module)); + return std::dynamic_pointer_cast(base_module); +} + +template +std::shared_ptr Module::register_module( + std::string name, + ModuleHolder module_holder) { + return register_module(std::move(name), module_holder.ptr()); +} + +template +std::shared_ptr Module::replace_module( + const std::string& name, + std::shared_ptr module) { + auto& base_module = (children_[name] = std::move(module)); + return std::dynamic_pointer_cast(base_module); +} + +template +std::shared_ptr Module::replace_module( + const std::string& name, + ModuleHolder module_holder) { + return replace_module(name, module_holder.ptr()); +} + +template +void Module::to_impl(Ts&&... ts) { + // First call `to()` on every child module. + for (auto& child : children_) { + child.value()->to(ts...); + } + // Then move every parameter to the new dtype/device. + for (auto& parameter : named_parameters(/*recurse=*/false)) { + parameter->set_data(autograd::Variable(*parameter).to(ts...)); + } + // Then move every buffer to the new dtype/device. + for (auto& buffer : named_buffers(/*recurse=*/false)) { + buffer->set_data(autograd::Variable(*buffer).to(ts...)); + } +} + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h new file mode 100644 index 0000000000000000000000000000000000000000..e037d52a8535490ff5ecb17e578df5b4101ee9a3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules.h @@ -0,0 +1,36 @@ +#pragma once + +// Common +#include + +// Containers +#include +#include +#include +#include +#include +#include +#include +#include + +// Layers +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options.h new file mode 100644 index 0000000000000000000000000000000000000000..4a5224a478e1b650e393dcb3f95adc13ab36d65f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/activation.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/activation.h new file mode 100644 index 0000000000000000000000000000000000000000..e51805d3648521406b1ce3f72108ccf0ea7a94c0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/activation.h @@ -0,0 +1,714 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `ELU` module. +/// +/// Example: +/// ``` +/// ELU model(ELUOptions().alpha(42.42).inplace(true)); +/// ``` +struct TORCH_API ELUOptions { + /// The `alpha` value for the ELU formulation. Default: 1.0 + TORCH_ARG(double, alpha) = 1.0; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::elu`. +/// +/// See the documentation for `torch::nn::ELUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true)); +/// ``` +using ELUFuncOptions = ELUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `SELU` module. +/// +/// Example: +/// ``` +/// SELU model(SELUOptions().inplace(true)); +/// ``` +struct TORCH_API SELUOptions { + /* implicit */ SELUOptions(bool inplace = false); + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace); +}; + +namespace functional { +/// Options for `torch::nn::functional::selu`. +/// +/// See the documentation for `torch::nn::SELUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::selu(input, F::SELUFuncOptions(false)); +/// ``` +using SELUFuncOptions = SELUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `GLU` module. +/// +/// Example: +/// ``` +/// GLU model(GLUOptions(1)); +/// ``` +struct TORCH_API GLUOptions { + /* implicit */ GLUOptions(int64_t dim = -1); + + /// the dimension on which to split the input. Default: -1 + TORCH_ARG(int64_t, dim); +}; + +namespace functional { +/// Options for `torch::nn::functional::glu`. +/// +/// See the documentation for `torch::nn::GLUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::glu(input, GLUFuncOptions(1)); +/// ``` +using GLUFuncOptions = GLUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `GELU` module. +/// +/// Example: +/// ``` +/// GELU model(GELUOptions().approximate("none")); +/// ``` +struct TORCH_API GELUOptions { + /// Specifies the approximation to apply to the output. + TORCH_ARG(std::string, approximate) = "none"; +}; + +namespace functional { +/// Options for `torch::nn::functional::gelu`. +/// +/// See the documentation for `torch::nn::GELUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::gelu(input, F::GELUFuncOptions().approximate("none")); +/// ``` +using GELUFuncOptions = GELUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Hardshrink` module. +/// +/// Example: +/// ``` +/// Hardshrink model(HardshrinkOptions().lambda(42.42)); +/// ``` +struct TORCH_API HardshrinkOptions { + /* implicit */ HardshrinkOptions(double lambda = 0.5); + + /// the `lambda` value for the Hardshrink formulation. Default: 0.5 + TORCH_ARG(double, lambda); +}; + +namespace functional { +/// Options for `torch::nn::functional::hardshrink`. +/// +/// See the documentation for `torch::nn::HardshrinkOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42)); +/// ``` +using HardshrinkFuncOptions = HardshrinkOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Hardtanh` module. +/// +/// Example: +/// ``` +/// Hardtanh +/// model(HardtanhOptions().min_val(-42.42).max_val(0.42).inplace(true)); +/// ``` +struct TORCH_API HardtanhOptions { + /// minimum value of the linear region range. Default: -1 + TORCH_ARG(double, min_val) = -1.0; + + /// maximum value of the linear region range. Default: 1 + TORCH_ARG(double, max_val) = 1.0; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::hardtanh`. +/// +/// See the documentation for `torch::nn::HardtanhOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::hardtanh(x, +/// F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true)); +/// ``` +using HardtanhFuncOptions = HardtanhOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `LeakyReLU` module. +/// +/// Example: +/// ``` +/// LeakyReLU model(LeakyReLUOptions().negative_slope(0.42).inplace(true)); +/// ``` +struct TORCH_API LeakyReLUOptions { + /// Controls the angle of the negative slope. Default: 1e-2 + TORCH_ARG(double, negative_slope) = 1e-2; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::leaky_relu`. +/// +/// See the documentation for `torch::nn::LeakyReLUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::leaky_relu(x, +/// F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true)); +/// ``` +using LeakyReLUFuncOptions = LeakyReLUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Softmax` module. +/// +/// Example: +/// ``` +/// Softmax model(SoftmaxOptions(1)); +/// ``` +struct TORCH_API SoftmaxOptions { + SoftmaxOptions(int64_t dim); + + /// Dimension along which Softmax will be computed. + TORCH_ARG(int64_t, dim); +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::softmax`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softmax(input, F::SoftmaxFuncOptions(1)); +/// ``` +struct TORCH_API SoftmaxFuncOptions { + SoftmaxFuncOptions(int64_t dim); + + /// Dimension along which Softmax will be computed. + TORCH_ARG(int64_t, dim); + + /// the desired data type of returned tensor. + /// If specified, the input tensor is casted to `dtype` before the operation + /// is performed. This is useful for preventing data type overflows. Default: + /// None. + TORCH_ARG(c10::optional, dtype) = c10::nullopt; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `Softmin` module. +/// +/// Example: +/// ``` +/// Softmin model(SoftminOptions(1)); +/// ``` +struct TORCH_API SoftminOptions { + SoftminOptions(int64_t dim); + + /// Dimension along which Softmin will be computed. + TORCH_ARG(int64_t, dim); +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::softmin`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softmin(input, F::SoftminFuncOptions(1)); +/// ``` +struct TORCH_API SoftminFuncOptions { + SoftminFuncOptions(int64_t dim); + + /// Dimension along which Softmin will be computed. + TORCH_ARG(int64_t, dim); + + /// the desired data type of returned tensor. + /// If specified, the input tensor is casted to `dtype` before the operation + /// is performed. This is useful for preventing data type overflows. Default: + /// None. + TORCH_ARG(c10::optional, dtype) = c10::nullopt; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `LogSoftmax` module. +/// +/// Example: +/// ``` +/// LogSoftmax model(LogSoftmaxOptions(1)); +/// ``` +struct TORCH_API LogSoftmaxOptions { + LogSoftmaxOptions(int64_t dim); + + /// Dimension along which LogSoftmax will be computed. + TORCH_ARG(int64_t, dim); +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::log_softmax`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::log_softmax(input, LogSoftmaxFuncOptions(1)); +/// ``` +struct TORCH_API LogSoftmaxFuncOptions { + LogSoftmaxFuncOptions(int64_t dim); + + /// Dimension along which LogSoftmax will be computed. + TORCH_ARG(int64_t, dim); + + /// the desired data type of returned tensor. + /// If specified, the input tensor is casted to `dtype` before the operation + /// is performed. This is useful for preventing data type overflows. Default: + /// None. + TORCH_ARG(c10::optional, dtype) = c10::nullopt; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `PReLU` module. +/// +/// Example: +/// ``` +/// PReLU model(PReLUOptions().num_parameters(42)); +/// ``` +struct TORCH_API PReLUOptions { + /// number of `a` to learn. Although it takes an int as input, there is only + /// two values are legitimate: 1, or the number of channels at input. Default: + /// 1 + TORCH_ARG(int64_t, num_parameters) = 1; + + /// the initial value of `a`. Default: 0.25 + TORCH_ARG(double, init) = 0.25; +}; + +// ============================================================================ + +/// Options for the `ReLU` module. +/// +/// Example: +/// ``` +/// ReLU model(ReLUOptions().inplace(true)); +/// ``` +struct TORCH_API ReLUOptions { + /* implicit */ ReLUOptions(bool inplace = false); + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace); +}; + +namespace functional { +/// Options for `torch::nn::functional::relu`. +/// +/// See the documentation for `torch::nn::ReLUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::relu(x, F::ReLUFuncOptions().inplace(true)); +/// ``` +using ReLUFuncOptions = ReLUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `ReLU6` module. +/// +/// Example: +/// ``` +/// ReLU6 model(ReLU6Options().inplace(true)); +/// ``` +struct TORCH_API ReLU6Options { + /* implicit */ ReLU6Options(bool inplace = false); + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace); +}; + +namespace functional { +/// Options for `torch::nn::functional::relu6`. +/// +/// See the documentation for `torch::nn::ReLU6Options` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::relu6(x, F::ReLU6FuncOptions().inplace(true)); +/// ``` +using ReLU6FuncOptions = ReLU6Options; +} // namespace functional + +// ============================================================================ + +/// Options for the `RReLU` module. +/// +/// Example: +/// ``` +/// RReLU model(RReLUOptions().lower(0.24).upper(0.42).inplace(true)); +/// ``` +struct TORCH_API RReLUOptions { + /// lower bound of the uniform distribution. Default: 1/8 + TORCH_ARG(double, lower) = 1.0 / 8.0; + + /// upper bound of the uniform distribution. Default: 1/3 + TORCH_ARG(double, upper) = 1.0 / 3.0; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::rrelu`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::rrelu(x, F::RReLUFuncOptions().lower(0.1).upper(0.4).inplace(true)); +/// ``` +struct TORCH_API RReLUFuncOptions { + /// lower bound of the uniform distribution. Default: 1/8 + TORCH_ARG(double, lower) = 1.0 / 8.0; + + /// upper bound of the uniform distribution. Default: 1/3 + TORCH_ARG(double, upper) = 1.0 / 3.0; + + TORCH_ARG(bool, training) = false; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `CELU` module. +/// +/// Example: +/// ``` +/// CELU model(CELUOptions().alpha(42.42).inplace(true)); +/// ``` +struct TORCH_API CELUOptions { + /// The `alpha` value for the CELU formulation. Default: 1.0 + TORCH_ARG(double, alpha) = 1.0; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::celu`. +/// +/// See the documentation for `torch::nn::CELUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true)); +/// ``` +using CELUFuncOptions = CELUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Softplus` module. +/// +/// Example: +/// ``` +/// Softplus model(SoftplusOptions().beta(0.24).threshold(42.42)); +/// ``` +struct TORCH_API SoftplusOptions { + /// the `beta` value for the Softplus formulation. Default: 1 + TORCH_ARG(double, beta) = 1.0; + + /// values above this revert to a linear function. Default: 20 + TORCH_ARG(double, threshold) = 20.0; +}; + +namespace functional { +/// Options for `torch::nn::functional::softplus`. +/// +/// See the documentation for `torch::nn::SoftplusOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0)); +/// ``` +using SoftplusFuncOptions = SoftplusOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Softshrink` module. +/// +/// Example: +/// ``` +/// Softshrink model(SoftshrinkOptions(42.42)); +/// ``` +struct TORCH_API SoftshrinkOptions { + /* implicit */ SoftshrinkOptions(double lambda = 0.5); + + /// the `lambda` value for the Softshrink formulation. Default: 0.5 + TORCH_ARG(double, lambda); +}; + +namespace functional { +/// Options for `torch::nn::functional::softshrink`. +/// +/// See the documentation for `torch::nn::SoftshrinkOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softshrink(x, F::SoftshrinkFuncOptions(0.42)); +/// ``` +using SoftshrinkFuncOptions = SoftshrinkOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Threshold` module. +/// +/// Example: +/// ``` +/// Threshold model(ThresholdOptions(42.42, 24.24).inplace(true)); +/// ``` +struct TORCH_API ThresholdOptions { + ThresholdOptions(double threshold, double value) + : threshold_(threshold), value_(value) {} + + /// The value to threshold at + TORCH_ARG(double, threshold); + + /// The value to replace with + TORCH_ARG(double, value); + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::threshold`. +/// +/// See the documentation for `torch::nn::ThresholdOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true)); +/// ``` +using ThresholdFuncOptions = ThresholdOptions; +} // namespace functional + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::gumbel_softmax`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(-1)); +/// ``` +struct TORCH_API GumbelSoftmaxFuncOptions { + /// non-negative scalar temperature + TORCH_ARG(double, tau) = 1.0; + + /// returned samples will be discretized as one-hot vectors, + /// but will be differentiated as if it is the soft sample in autograd. + /// Default: False + TORCH_ARG(bool, hard) = false; + + /// dimension along which softmax will be computed. Default: -1 + TORCH_ARG(int, dim) = -1; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `MultiheadAttention` module. +/// +/// Example: +/// ``` +/// MultiheadAttention model(MultiheadAttentionOptions(20, 10).bias(false)); +/// ``` +struct TORCH_API MultiheadAttentionOptions { + MultiheadAttentionOptions(int64_t embed_dim, int64_t num_heads); + + /// total dimension of the model. + TORCH_ARG(int64_t, embed_dim); + + /// parallel attention heads. + TORCH_ARG(int64_t, num_heads); + + /// a Dropout layer on attn_output_weights. Default: 0.0. + TORCH_ARG(double, dropout) = 0.0; + + /// add bias as module parameter. Default: true. + TORCH_ARG(bool, bias) = true; + + /// add bias to the key and value sequences at dim=0. + TORCH_ARG(bool, add_bias_kv) = false; + + /// add a new batch of zeros to the key and value sequences at dim=1. + TORCH_ARG(bool, add_zero_attn) = false; + + /// total number of features in key. Default: c10::nullopt. + TORCH_ARG(int64_t, kdim); + + /// total number of features in key. Default: c10::nullopt. + TORCH_ARG(int64_t, vdim); +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::multi_head_attention_forward` +struct TORCH_API MultiheadAttentionForwardFuncOptions { + MultiheadAttentionForwardFuncOptions( + int64_t embed_dim_to_check, + int64_t num_heads, + Tensor in_proj_weight, + Tensor in_proj_bias, + Tensor bias_k, + Tensor bias_v, + bool add_zero_attn, + double dropout_p, + Tensor out_proj_weight, + Tensor out_proj_bias); + + TORCH_ARG(int64_t, embed_dim_to_check); + + TORCH_ARG(int64_t, num_heads); + + TORCH_ARG(Tensor, in_proj_weight); + + TORCH_ARG(Tensor, in_proj_bias); + + TORCH_ARG(Tensor, bias_k); + + TORCH_ARG(Tensor, bias_v); + + TORCH_ARG(bool, add_zero_attn); + + TORCH_ARG(double, dropout_p); + + TORCH_ARG(Tensor, out_proj_weight); + + TORCH_ARG(Tensor, out_proj_bias); + + TORCH_ARG(bool, training) = true; + + TORCH_ARG(Tensor, key_padding_mask) = {}; + + TORCH_ARG(bool, need_weights) = true; + + TORCH_ARG(Tensor, attn_mask) = {}; + + TORCH_ARG(bool, use_separate_proj_weight) = false; + + TORCH_ARG(Tensor, q_proj_weight) = {}; + + TORCH_ARG(Tensor, k_proj_weight) = {}; + + TORCH_ARG(Tensor, v_proj_weight) = {}; + + TORCH_ARG(Tensor, static_k) = {}; + + TORCH_ARG(Tensor, static_v) = {}; + + TORCH_ARG(bool, average_attn_weights) = true; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/adaptive.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/adaptive.h new file mode 100644 index 0000000000000000000000000000000000000000..d4754747a1d296b878231851af9d1b4caee94ab4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/adaptive.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `AdaptiveLogSoftmaxWithLoss` module. +/// +/// Example: +/// ``` +/// AdaptiveLogSoftmaxWithLoss model(AdaptiveLogSoftmaxWithLossOptions(8, 10, +/// {4, 8}).div_value(2.).head_bias(true)); +/// ``` +struct TORCH_API AdaptiveLogSoftmaxWithLossOptions { + /* implicit */ AdaptiveLogSoftmaxWithLossOptions( + int64_t in_features, + int64_t n_classes, + std::vector cutoffs); + + /// Number of features in the input tensor + TORCH_ARG(int64_t, in_features); + + /// Number of classes in the dataset + TORCH_ARG(int64_t, n_classes); + + /// Cutoffs used to assign targets to their buckets + TORCH_ARG(std::vector, cutoffs); + + /// value used as an exponent to compute sizes of the clusters. Default: 4.0 + TORCH_ARG(double, div_value) = 4.; + + /// If ``true``, adds a bias term to the 'head' of + /// the adaptive softmax. Default: false + TORCH_ARG(bool, head_bias) = false; +}; + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/batchnorm.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/batchnorm.h new file mode 100644 index 0000000000000000000000000000000000000000..cd2d7f164203e89545bd6fe63e25eded31b779bd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/batchnorm.h @@ -0,0 +1,95 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `BatchNorm` module. +struct TORCH_API BatchNormOptions { + /* implicit */ BatchNormOptions(int64_t num_features); + + /// The number of features of the input tensor. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, num_features); + + /// The epsilon value added for numerical stability. + /// Changing this parameter after construction __is effective__. + TORCH_ARG(double, eps) = 1e-5; + + /// A momentum multiplier for the mean and variance. + /// Changing this parameter after construction __is effective__. + TORCH_ARG(c10::optional, momentum) = 0.1; + + /// Whether to learn a scale and bias that are applied in an affine + /// transformation on the input. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(bool, affine) = true; + + /// Whether to store and update batch statistics (mean and variance) in the + /// module. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(bool, track_running_stats) = true; +}; + +/// Options for the `BatchNorm1d` module. +/// +/// Example: +/// ``` +/// BatchNorm1d +/// model(BatchNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +using BatchNorm1dOptions = BatchNormOptions; + +/// Options for the `BatchNorm2d` module. +/// +/// Example: +/// ``` +/// BatchNorm2d +/// model(BatchNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +using BatchNorm2dOptions = BatchNormOptions; + +/// Options for the `BatchNorm3d` module. +/// +/// Example: +/// ``` +/// BatchNorm3d +/// model(BatchNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +using BatchNorm3dOptions = BatchNormOptions; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::batch_norm`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::batch_norm(input, mean, variance, +/// F::BatchNormFuncOptions().weight(weight).bias(bias).momentum(0.1).eps(1e-05).training(false)); +/// ``` +struct TORCH_API BatchNormFuncOptions { + TORCH_ARG(Tensor, weight) = Tensor(); + + TORCH_ARG(Tensor, bias) = Tensor(); + + TORCH_ARG(bool, training) = false; + + /// A momentum multiplier for the mean and variance. + /// Changing this parameter after construction __is effective__. + TORCH_ARG(c10::optional, momentum) = 0.1; + + /// The epsilon value added for numerical stability. + /// Changing this parameter after construction __is effective__. + TORCH_ARG(double, eps) = 1e-5; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/conv.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/conv.h new file mode 100644 index 0000000000000000000000000000000000000000..0b5b5b1b3f955abf2ec36a806fb1344611c4f060 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/conv.h @@ -0,0 +1,415 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +namespace detail { + +typedef std::variant< + enumtype::kZeros, + enumtype::kReflect, + enumtype::kReplicate, + enumtype::kCircular> + conv_padding_mode_t; + +template +using conv_padding_t = + std::variant, enumtype::kValid, enumtype::kSame>; + +/// Options for a `D`-dimensional convolution or convolution transpose module. +template +struct ConvNdOptions { + using padding_t = conv_padding_t; + ConvNdOptions( + int64_t in_channels, + int64_t out_channels, + ExpandingArray kernel_size) + : in_channels_(in_channels), + out_channels_(out_channels), + kernel_size_(std::move(kernel_size)) {} + + /// The number of channels the input volumes will have. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, in_channels); + + /// The number of output channels the convolution should produce. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, out_channels); + + /// The kernel size to use. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, kernel_size); + + /// The stride of the convolution. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, stride) = 1; + + /// The padding to add to the input volumes. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(padding_t, padding) = 0; + + public: + decltype(auto) padding(std::initializer_list il) { + return padding(IntArrayRef{il}); + } + + /// The kernel dilation. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, dilation) = 1; + + /// If true, convolutions will be transpose convolutions (a.k.a. + /// deconvolutions). + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(bool, transposed) = false; + + /// For transpose convolutions, the padding to add to output volumes. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, output_padding) = 0; + + /// The number of convolution groups. + /// This parameter __can__ be changed after construction. + TORCH_ARG(int64_t, groups) = 1; + + /// Whether to add a bias after individual applications of the kernel. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(bool, bias) = true; + + /// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or + /// `torch::kCircular`. Default: `torch::kZeros` + TORCH_ARG(conv_padding_mode_t, padding_mode) = torch::kZeros; +}; + +} // namespace detail + +// ============================================================================ + +/// Options for a `D`-dimensional convolution module. +template +struct ConvOptions { + using padding_mode_t = detail::conv_padding_mode_t; + using padding_t = detail::conv_padding_t; + + ConvOptions( + int64_t in_channels, + int64_t out_channels, + ExpandingArray kernel_size) + : in_channels_(in_channels), + out_channels_(out_channels), + kernel_size_(std::move(kernel_size)) {} + + /// The number of channels the input volumes will have. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, in_channels); + + /// The number of output channels the convolution should produce. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, out_channels); + + /// The kernel size to use. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, kernel_size); + + /// The stride of the convolution. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, stride) = 1; + + /// The padding to add to the input volumes. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(padding_t, padding) = 0; + + public: + decltype(auto) padding(std::initializer_list il) { + return padding(IntArrayRef{il}); + } + + /// The kernel dilation. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, dilation) = 1; + + /// The number of convolution groups. + /// This parameter __can__ be changed after construction. + TORCH_ARG(int64_t, groups) = 1; + + /// Whether to add a bias after individual applications of the kernel. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(bool, bias) = true; + + /// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or + /// `torch::kCircular`. Default: `torch::kZeros` + TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros; +}; + +/// `ConvOptions` specialized for the `Conv1d` module. +/// +/// Example: +/// ``` +/// Conv1d model(Conv1dOptions(3, 2, 3).stride(1).bias(false)); +/// ``` +using Conv1dOptions = ConvOptions<1>; + +/// `ConvOptions` specialized for the `Conv2d` module. +/// +/// Example: +/// ``` +/// Conv2d model(Conv2dOptions(3, 2, 3).stride(1).bias(false)); +/// ``` +using Conv2dOptions = ConvOptions<2>; + +/// `ConvOptions` specialized for the `Conv3d` module. +/// +/// Example: +/// ``` +/// Conv3d model(Conv3dOptions(3, 2, 3).stride(1).bias(false)); +/// ``` +using Conv3dOptions = ConvOptions<3>; + +// ============================================================================ + +namespace functional { + +/// Options for a `D`-dimensional convolution functional. +template +struct ConvFuncOptions { + using padding_t = torch::nn::detail::conv_padding_t; + + /// optional bias of shape `(out_channels)`. Default: ``None`` + TORCH_ARG(torch::Tensor, bias) = Tensor(); + + /// The stride of the convolving kernel. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + TORCH_ARG(ExpandingArray, stride) = 1; + + /// Implicit paddings on both sides of the input. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + TORCH_ARG(padding_t, padding) = 0; + + public: + decltype(auto) padding(std::initializer_list il) { + return padding(IntArrayRef{il}); + } + + /// The spacing between kernel elements. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + TORCH_ARG(ExpandingArray, dilation) = 1; + + /// Split input into groups, `in_channels` should be divisible by + /// the number of groups. + TORCH_ARG(int64_t, groups) = 1; +}; + +/// `ConvFuncOptions` specialized for `torch::nn::functional::conv1d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1)); +/// ``` +using Conv1dFuncOptions = ConvFuncOptions<1>; + +/// `ConvFuncOptions` specialized for `torch::nn::functional::conv2d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1)); +/// ``` +using Conv2dFuncOptions = ConvFuncOptions<2>; + +/// `ConvFuncOptions` specialized for `torch::nn::functional::conv3d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1)); +/// ``` +using Conv3dFuncOptions = ConvFuncOptions<3>; + +} // namespace functional + +// ============================================================================ + +template +struct ConvTransposeOptions { + using padding_mode_t = detail::conv_padding_mode_t; + + ConvTransposeOptions( + int64_t in_channels, + int64_t out_channels, + ExpandingArray kernel_size) + : in_channels_(in_channels), + out_channels_(out_channels), + kernel_size_(std::move(kernel_size)) {} + + /// The number of channels the input volumes will have. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, in_channels); + + /// The number of output channels the convolution should produce. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, out_channels); + + /// The kernel size to use. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, kernel_size); + + /// The stride of the convolution. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, stride) = 1; + + /// The padding to add to the input volumes. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, padding) = 0; + + /// For transpose convolutions, the padding to add to output volumes. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, output_padding) = 0; + + /// The number of convolution groups. + /// This parameter __can__ be changed after construction. + TORCH_ARG(int64_t, groups) = 1; + + /// Whether to add a bias after individual applications of the kernel. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(bool, bias) = true; + + /// The kernel dilation. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, dilation) = 1; + + /// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or + /// `torch::kCircular`. Default: `torch::kZeros` + TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros; +}; + +/// `ConvTransposeOptions` specialized for the `ConvTranspose1d` module. +/// +/// Example: +/// ``` +/// ConvTranspose1d model(ConvTranspose1dOptions(3, 2, +/// 3).stride(1).bias(false)); +/// ``` +using ConvTranspose1dOptions = ConvTransposeOptions<1>; + +/// `ConvTransposeOptions` specialized for the `ConvTranspose2d` module. +/// +/// Example: +/// ``` +/// ConvTranspose2d model(ConvTranspose2dOptions(3, 2, +/// 3).stride(1).bias(false)); +/// ``` +using ConvTranspose2dOptions = ConvTransposeOptions<2>; + +/// `ConvTransposeOptions` specialized for the `ConvTranspose3d` module. +/// +/// Example: +/// ``` +/// ConvTranspose3d model(ConvTranspose3dOptions(2, 2, +/// 2).stride(1).bias(false)); +/// ``` +using ConvTranspose3dOptions = ConvTransposeOptions<3>; + +// ============================================================================ + +namespace functional { + +/// Options for a `D`-dimensional convolution functional. +template +struct ConvTransposeFuncOptions { + /// optional bias of shape `(out_channels)`. Default: ``None`` + TORCH_ARG(torch::Tensor, bias) = Tensor(); + + /// The stride of the convolving kernel. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + TORCH_ARG(ExpandingArray, stride) = 1; + + /// Implicit paddings on both sides of the input. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + TORCH_ARG(ExpandingArray, padding) = 0; + + /// Additional size added to one side of each dimension in the output shape. + /// Default: 0 + TORCH_ARG(ExpandingArray, output_padding) = 0; + + /// Split input into groups, `in_channels` should be divisible by + /// the number of groups. + TORCH_ARG(int64_t, groups) = 1; + + /// The spacing between kernel elements. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + TORCH_ARG(ExpandingArray, dilation) = 1; +}; + +/// `ConvTransposeFuncOptions` specialized for +/// `torch::nn::functional::conv_transpose1d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1)); +/// ``` +using ConvTranspose1dFuncOptions = ConvTransposeFuncOptions<1>; + +/// `ConvTransposeFuncOptions` specialized for +/// `torch::nn::functional::conv_transpose2d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1)); +/// ``` +using ConvTranspose2dFuncOptions = ConvTransposeFuncOptions<2>; + +/// `ConvTransposeFuncOptions` specialized for +/// `torch::nn::functional::conv_transpose3d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1)); +/// ``` +using ConvTranspose3dFuncOptions = ConvTransposeFuncOptions<3>; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/distance.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/distance.h new file mode 100644 index 0000000000000000000000000000000000000000..654cd6626498db19fe1ec548e1d10c48cfeb390f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/distance.h @@ -0,0 +1,71 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `CosineSimilarity` module. +/// +/// Example: +/// ``` +/// CosineSimilarity model(CosineSimilarityOptions().dim(0).eps(0.5)); +/// ``` +struct TORCH_API CosineSimilarityOptions { + /// Dimension where cosine similarity is computed. Default: 1 + TORCH_ARG(int64_t, dim) = 1; + /// Small value to avoid division by zero. Default: 1e-8 + TORCH_ARG(double, eps) = 1e-8; +}; + +namespace functional { +/// Options for `torch::nn::functional::cosine_similarity`. +/// +/// See the documentation for `torch::nn::CosineSimilarityOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cosine_similarity(input1, input2, +/// F::CosineSimilarityFuncOptions().dim(1)); +/// ``` +using CosineSimilarityFuncOptions = CosineSimilarityOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `PairwiseDistance` module. +/// +/// Example: +/// ``` +/// PairwiseDistance +/// model(PairwiseDistanceOptions().p(3).eps(0.5).keepdim(true)); +/// ``` +struct TORCH_API PairwiseDistanceOptions { + /// The norm degree. Default: 2 + TORCH_ARG(double, p) = 2.0; + /// Small value to avoid division by zero. Default: 1e-6 + TORCH_ARG(double, eps) = 1e-6; + /// Determines whether or not to keep the vector dimension. Default: false + TORCH_ARG(bool, keepdim) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::pairwise_distance`. +/// +/// See the documentation for `torch::nn::PairwiseDistanceOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1)); +/// ``` +using PairwiseDistanceFuncOptions = PairwiseDistanceOptions; +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/dropout.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/dropout.h new file mode 100644 index 0000000000000000000000000000000000000000..7f41f5672382c9e612b8c6f29c4ac261f02e328c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/dropout.h @@ -0,0 +1,130 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `Dropout` module. +/// +/// Example: +/// ``` +/// Dropout model(DropoutOptions().p(0.42).inplace(true)); +/// ``` +struct TORCH_API DropoutOptions { + /* implicit */ DropoutOptions(double p = 0.5); + + /// The probability of an element to be zeroed. Default: 0.5 + TORCH_ARG(double, p) = 0.5; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +/// Options for the `Dropout2d` module. +/// +/// Example: +/// ``` +/// Dropout2d model(Dropout2dOptions().p(0.42).inplace(true)); +/// ``` +using Dropout2dOptions = DropoutOptions; + +/// Options for the `Dropout3d` module. +/// +/// Example: +/// ``` +/// Dropout3d model(Dropout3dOptions().p(0.42).inplace(true)); +/// ``` +using Dropout3dOptions = DropoutOptions; + +/// Options for the `AlphaDropout` module. +/// +/// Example: +/// ``` +/// AlphaDropout model(AlphaDropoutOptions(0.2).inplace(true)); +/// ``` +using AlphaDropoutOptions = DropoutOptions; + +/// Options for the `FeatureAlphaDropout` module. +/// +/// Example: +/// ``` +/// FeatureAlphaDropout model(FeatureAlphaDropoutOptions(0.2).inplace(true)); +/// ``` +using FeatureAlphaDropoutOptions = DropoutOptions; + +namespace functional { + +/// Options for `torch::nn::functional::dropout`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::dropout(input, F::DropoutFuncOptions().p(0.5)); +/// ``` +struct TORCH_API DropoutFuncOptions { + /// The probability of an element to be zeroed. Default: 0.5 + TORCH_ARG(double, p) = 0.5; + + TORCH_ARG(bool, training) = true; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +/// Options for `torch::nn::functional::dropout2d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5)); +/// ``` +using Dropout2dFuncOptions = DropoutFuncOptions; + +/// Options for `torch::nn::functional::dropout3d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5)); +/// ``` +using Dropout3dFuncOptions = DropoutFuncOptions; + +/// Options for `torch::nn::functional::alpha_dropout`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::alpha_dropout(input, +/// F::AlphaDropoutFuncOptions().p(0.5).training(false)); +/// ``` +struct TORCH_API AlphaDropoutFuncOptions { + TORCH_ARG(double, p) = 0.5; + + TORCH_ARG(bool, training) = false; + + TORCH_ARG(bool, inplace) = false; +}; + +/// Options for `torch::nn::functional::feature_alpha_dropout`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::feature_alpha_dropout(input, +/// F::FeatureAlphaDropoutFuncOptions().p(0.5).training(false)); +/// ``` +struct TORCH_API FeatureAlphaDropoutFuncOptions { + TORCH_ARG(double, p) = 0.5; + + TORCH_ARG(bool, training) = false; + + TORCH_ARG(bool, inplace) = false; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/embedding.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/embedding.h new file mode 100644 index 0000000000000000000000000000000000000000..d8d06716308e138559a04ec7804b226c768cf3e1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/embedding.h @@ -0,0 +1,242 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `Embedding` module. +/// +/// Example: +/// ``` +/// Embedding model(EmbeddingOptions(10, +/// 2).padding_idx(3).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true)); +/// ``` +struct TORCH_API EmbeddingOptions { + EmbeddingOptions(int64_t num_embeddings, int64_t embedding_dim); + + /// The size of the dictionary of embeddings. + TORCH_ARG(int64_t, num_embeddings); + /// The size of each embedding vector. + TORCH_ARG(int64_t, embedding_dim); + /// If specified, the entries at `padding_idx` do not contribute to the + /// gradient; therefore, the embedding vector at `padding_idx` is not updated + /// during training, i.e. it remains as a fixed "pad". For a newly constructed + /// Embedding, the embedding vector at `padding_idx` will default to all + /// zeros, but can be updated to another value to be used as the padding + /// vector. + TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; + /// If given, each embedding vector with norm larger than `max_norm` is + /// renormalized to have norm `max_norm`. + TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. + TORCH_ARG(double, norm_type) = 2.; + /// If given, this will scale gradients by the inverse of frequency of the + /// words in the mini-batch. Default ``false``. + TORCH_ARG(bool, scale_grad_by_freq) = false; + /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor. + TORCH_ARG(bool, sparse) = false; + /// The learnable weights of the module of shape (num_embeddings, + /// embedding_dim) + TORCH_ARG(torch::Tensor, _weight) = Tensor(); +}; + +// ============================================================================ + +/// Options for the `Embedding::from_pretrained` function. +struct TORCH_API EmbeddingFromPretrainedOptions { + /// If ``true``, the tensor does not get updated in the learning process. + /// Equivalent to ``embedding.weight.requires_grad_(false)``. Default: + /// ``true`` + TORCH_ARG(bool, freeze) = true; + /// If specified, the entries at `padding_idx` do not contribute to the + /// gradient; therefore, the embedding vector at `padding_idx` is not updated + /// during training, i.e. it remains as a fixed "pad". + TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; + /// If given, each embedding vector with norm larger than `max_norm` is + /// renormalized to have norm `max_norm`. + TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. + TORCH_ARG(double, norm_type) = 2.; + /// If given, this will scale gradients by the inverse of frequency of the + /// words in the mini-batch. Default ``false``. + TORCH_ARG(bool, scale_grad_by_freq) = false; + /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor. + TORCH_ARG(bool, sparse) = false; +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::embedding`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::embedding(input, weight, +/// F::EmbeddingFuncOptions().norm_type(2.5).scale_grad_by_freq(true).sparse(true)); +/// ``` +struct TORCH_API EmbeddingFuncOptions { + /// If specified, the entries at `padding_idx` do not contribute to the + /// gradient; therefore, the embedding vector at `padding_idx` is not updated + /// during training, i.e. it remains as a fixed "pad". + TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; + /// If given, each embedding vector with norm larger than `max_norm` is + /// renormalized to have norm `max_norm`. + TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. + TORCH_ARG(double, norm_type) = 2.; + /// If given, this will scale gradients by the inverse of frequency of the + /// words in the mini-batch. Default ``false``. + TORCH_ARG(bool, scale_grad_by_freq) = false; + /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor. + TORCH_ARG(bool, sparse) = false; +}; + +} // namespace functional + +// ============================================================================ + +typedef std::variant + EmbeddingBagMode; + +/// Options for the `EmbeddingBag` module. +/// +/// Example: +/// ``` +/// EmbeddingBag model(EmbeddingBagOptions(10, +/// 2).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true).mode(torch::kSum)); +/// ``` +struct TORCH_API EmbeddingBagOptions { + EmbeddingBagOptions(int64_t num_embeddings, int64_t embedding_dim); + + /// The size of the dictionary of embeddings. + TORCH_ARG(int64_t, num_embeddings); + /// The size of each embedding vector. + TORCH_ARG(int64_t, embedding_dim); + /// If given, each embedding vector with norm larger than `max_norm` is + /// renormalized to have norm `max_norm`. + TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. + TORCH_ARG(double, norm_type) = 2.; + /// If given, this will scale gradients by the inverse of frequency of the + /// words in the mini-batch. Default ``false``. Note: this option is not + /// supported when ``mode="kMax"``. + TORCH_ARG(bool, scale_grad_by_freq) = false; + /// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the + /// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights` + /// into consideration. ``"kMean"`` computes the average of the values in the + /// bag, ``"kMax"`` computes the max value over each bag. + TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean; + /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor. + /// Note: this option is not supported when ``mode="kMax"``. + TORCH_ARG(bool, sparse) = false; + /// The learnable weights of the module of shape (num_embeddings, + /// embedding_dim) + TORCH_ARG(torch::Tensor, _weight) = Tensor(); + /// If ``true``, `offsets` has one additional element, where the last element + /// is equivalent to the size of `indices`. This matches the CSR format. + TORCH_ARG(bool, include_last_offset) = false; + /// If specified, the entries at `padding_idx` do not contribute to the + /// gradient; therefore, the embedding vector at padding_idx is not updated + /// during training, i.e. it remains as a fixed "pad". For a newly constructed + /// EmbeddingBag, the embedding vector at `padding_idx` will default to all + /// zeros, but can be updated to another value to be used as the padding + /// vector. Note that the embedding vector at `padding_idx` is excluded from + /// the reduction. + TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; +}; + +// ============================================================================ + +/// Options for the `EmbeddingBag::from_pretrained` function. +struct TORCH_API EmbeddingBagFromPretrainedOptions { + /// If ``true``, the tensor does not get updated in the learning process. + /// Equivalent to ``embeddingbag.weight.requires_grad_(false)``. Default: + /// ``true`` + TORCH_ARG(bool, freeze) = true; + /// If given, each embedding vector with norm larger than `max_norm` is + /// renormalized to have norm `max_norm`. + TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. + TORCH_ARG(double, norm_type) = 2.; + /// If given, this will scale gradients by the inverse of frequency of the + /// words in the mini-batch. Default ``false``. Note: this option is not + /// supported when ``mode="kMax"``. + TORCH_ARG(bool, scale_grad_by_freq) = false; + /// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the + /// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights` + /// into consideration. ``"kMean"`` computes the average of the values in the + /// bag, ``"kMax"`` computes the max value over each bag. + TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean; + /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor. + /// Note: this option is not supported when ``mode="kMax"``. + TORCH_ARG(bool, sparse) = false; + /// If ``true``, `offsets` has one additional element, where the last element + /// is equivalent to the size of `indices`. This matches the CSR format. Note: + /// this option is currently only supported when ``mode="sum"``. + TORCH_ARG(bool, include_last_offset) = false; + /// If specified, the entries at `padding_idx` do not contribute to the + /// gradient; therefore, the embedding vector at padding_idx is not updated + /// during training, i.e. it remains as a fixed "pad". Note that the embedding + /// vector at `padding_idx` is excluded from the reduction. + TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::embedding_bag`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::embedding_bag(input, weight, +/// F::EmbeddingBagFuncOptions().mode(torch::kSum).offsets(offsets)); +/// ``` +struct TORCH_API EmbeddingBagFuncOptions { + /// Only used when `input` is 1D. `offsets` determines + /// the starting index position of each bag (sequence) in `input`. + TORCH_ARG(torch::Tensor, offsets) = Tensor(); + /// If given, each embedding vector with norm larger than `max_norm` is + /// renormalized to have norm `max_norm`. + TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. + TORCH_ARG(double, norm_type) = 2.; + /// If given, this will scale gradients by the inverse of frequency of the + /// words in the mini-batch. Default ``false``. Note: this option is not + /// supported when ``mode="kMax"``. + TORCH_ARG(bool, scale_grad_by_freq) = false; + /// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the + /// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights` + /// into consideration. ``"kMean"`` computes the average of the values in the + /// bag, ``"kMax"`` computes the max value over each bag. + TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean; + /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor. + /// Note: this option is not supported when ``mode="kMax"``. + TORCH_ARG(bool, sparse) = false; + /// a tensor of float / double weights, or None to indicate all weights should + /// be taken to be 1. If specified, `per_sample_weights` must have exactly the + /// same shape as input and is treated as having the same `offsets`, if those + /// are not None. + TORCH_ARG(torch::Tensor, per_sample_weights) = Tensor(); + /// If ``true``, `offsets` has one additional element, where the last element + /// is equivalent to the size of `indices`. This matches the CSR format. Note: + /// this option is currently only supported when ``mode="sum"``. + TORCH_ARG(bool, include_last_offset) = false; + /// If specified, the entries at `padding_idx` do not contribute to the + /// gradient; therefore, the embedding vector at padding_idx is not updated + /// during training, i.e. it remains as a fixed "pad". Note that the embedding + /// vector at `padding_idx` is excluded from the reduction. + TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/fold.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/fold.h new file mode 100644 index 0000000000000000000000000000000000000000..21c24bff845acfcf94724c2b0dc717b7ca250d2a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/fold.h @@ -0,0 +1,99 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `Fold` module. +/// +/// Example: +/// ``` +/// Fold model(FoldOptions({8, 8}, {3, 3}).dilation(2).padding({2, +/// 1}).stride(2)); +/// ``` +struct TORCH_API FoldOptions { + FoldOptions(ExpandingArray<2> output_size, ExpandingArray<2> kernel_size) + : output_size_(std::move(output_size)), + kernel_size_(std::move(kernel_size)) {} + + /// describes the spatial shape of the large containing tensor of the sliding + /// local blocks. It is useful to resolve the ambiguity when multiple input + /// shapes map to same number of sliding blocks, e.g., with stride > 0. + TORCH_ARG(ExpandingArray<2>, output_size); + + /// the size of the sliding blocks + TORCH_ARG(ExpandingArray<2>, kernel_size); + + /// controls the spacing between the kernel points; also known as the à trous + /// algorithm. + TORCH_ARG(ExpandingArray<2>, dilation) = 1; + + /// controls the amount of implicit zero-paddings on both sides for padding + /// number of points for each dimension before reshaping. + TORCH_ARG(ExpandingArray<2>, padding) = 0; + + /// controls the stride for the sliding blocks. + TORCH_ARG(ExpandingArray<2>, stride) = 1; +}; + +namespace functional { +/// Options for `torch::nn::functional::fold`. +/// +/// See the documentation for `torch::nn::FoldOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2})); +/// ``` +using FoldFuncOptions = FoldOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Unfold` module. +/// +/// Example: +/// ``` +/// Unfold model(UnfoldOptions({2, 4}).dilation(2).padding({2, 1}).stride(2)); +/// ``` +struct TORCH_API UnfoldOptions { + UnfoldOptions(ExpandingArray<2> kernel_size) + : kernel_size_(std::move(kernel_size)) {} + + /// the size of the sliding blocks + TORCH_ARG(ExpandingArray<2>, kernel_size); + + /// controls the spacing between the kernel points; also known as the à trous + /// algorithm. + TORCH_ARG(ExpandingArray<2>, dilation) = 1; + + /// controls the amount of implicit zero-paddings on both sides for padding + /// number of points for each dimension before reshaping. + TORCH_ARG(ExpandingArray<2>, padding) = 0; + + /// controls the stride for the sliding blocks. + TORCH_ARG(ExpandingArray<2>, stride) = 1; +}; + +namespace functional { +/// Options for `torch::nn::functional::unfold`. +/// +/// See the documentation for `torch::nn::UnfoldOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2)); +/// ``` +using UnfoldFuncOptions = UnfoldOptions; +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/instancenorm.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/instancenorm.h new file mode 100644 index 0000000000000000000000000000000000000000..d93e10d0c95a23befcfe0318ebc4ddcd96aa5e71 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/instancenorm.h @@ -0,0 +1,89 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `InstanceNorm` module. +struct TORCH_API InstanceNormOptions { + /* implicit */ InstanceNormOptions(int64_t num_features); + + /// The number of features of the input tensor. + TORCH_ARG(int64_t, num_features); + + /// The epsilon value added for numerical stability. + TORCH_ARG(double, eps) = 1e-5; + + /// A momentum multiplier for the mean and variance. + TORCH_ARG(double, momentum) = 0.1; + + /// Whether to learn a scale and bias that are applied in an affine + /// transformation on the input. + TORCH_ARG(bool, affine) = false; + + /// Whether to store and update batch statistics (mean and variance) in the + /// module. + TORCH_ARG(bool, track_running_stats) = false; +}; + +/// Options for the `InstanceNorm1d` module. +/// +/// Example: +/// ``` +/// InstanceNorm1d +/// model(InstanceNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +using InstanceNorm1dOptions = InstanceNormOptions; + +/// Options for the `InstanceNorm2d` module. +/// +/// Example: +/// ``` +/// InstanceNorm2d +/// model(InstanceNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +using InstanceNorm2dOptions = InstanceNormOptions; + +/// Options for the `InstanceNorm3d` module. +/// +/// Example: +/// ``` +/// InstanceNorm3d +/// model(InstanceNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +using InstanceNorm3dOptions = InstanceNormOptions; + +namespace functional { + +/// Options for `torch::nn::functional::instance_norm`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::instance_norm(input, +/// F::InstanceNormFuncOptions().running_mean(mean).running_var(variance).weight(weight).bias(bias).momentum(0.1).eps(1e-5)); +/// ``` +struct TORCH_API InstanceNormFuncOptions { + TORCH_ARG(Tensor, running_mean) = Tensor(); + + TORCH_ARG(Tensor, running_var) = Tensor(); + + TORCH_ARG(Tensor, weight) = Tensor(); + + TORCH_ARG(Tensor, bias) = Tensor(); + + TORCH_ARG(bool, use_input_stats) = true; + + TORCH_ARG(double, momentum) = 0.1; + + TORCH_ARG(double, eps) = 1e-5; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h new file mode 100644 index 0000000000000000000000000000000000000000..5952d97806b378814f8bb0c1ffa6cf783d2f8426 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h @@ -0,0 +1,95 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `Linear` module. +/// +/// Example: +/// ``` +/// Linear model(LinearOptions(5, 2).bias(false)); +/// ``` +struct TORCH_API LinearOptions { + LinearOptions(int64_t in_features, int64_t out_features); + /// size of each input sample + TORCH_ARG(int64_t, in_features); + + /// size of each output sample + TORCH_ARG(int64_t, out_features); + + /// If set to false, the layer will not learn an additive bias. Default: true + TORCH_ARG(bool, bias) = true; +}; + +// ============================================================================ + +/// Options for the `Flatten` module. +/// +/// Example: +/// ``` +/// Flatten model(FlattenOptions().start_dim(2).end_dim(4)); +/// ``` +struct TORCH_API FlattenOptions { + /// first dim to flatten + TORCH_ARG(int64_t, start_dim) = 1; + /// last dim to flatten + TORCH_ARG(int64_t, end_dim) = -1; +}; + +// ============================================================================ + +/// Options for the `Unflatten` module. +/// +/// Note: If input tensor is named, use dimname and namedshape arguments. +/// +/// Example: +/// ``` +/// Unflatten unnamed_model(UnflattenOptions(0, {2, 2})); +/// Unflatten named_model(UnflattenOptions("B", {{"B1", 2}, {"B2", 2}})); +/// ``` +struct TORCH_API UnflattenOptions { + typedef std::vector> namedshape_t; + + UnflattenOptions(int64_t dim, std::vector sizes); + UnflattenOptions(const char* dimname, namedshape_t namedshape); + UnflattenOptions(std::string dimname, namedshape_t namedshape); + + /// dim to unflatten + TORCH_ARG(int64_t, dim); + /// name of dim to unflatten, for use with named tensors + TORCH_ARG(std::string, dimname); + /// new shape of unflattened dim + TORCH_ARG(std::vector, sizes); + /// new shape of unflattened dim with names, for use with named tensors + TORCH_ARG(namedshape_t, namedshape); +}; + +// ============================================================================ + +/// Options for the `Bilinear` module. +/// +/// Example: +/// ``` +/// Bilinear model(BilinearOptions(3, 2, 4).bias(false)); +/// ``` +struct TORCH_API BilinearOptions { + BilinearOptions( + int64_t in1_features, + int64_t in2_features, + int64_t out_features); + /// The number of features in input 1 (columns of the input1 matrix). + TORCH_ARG(int64_t, in1_features); + /// The number of features in input 2 (columns of the input2 matrix). + TORCH_ARG(int64_t, in2_features); + /// The number of output features to produce (columns of the output matrix). + TORCH_ARG(int64_t, out_features); + /// Whether to learn and add a bias after the bilinear transformation. + TORCH_ARG(bool, bias) = true; +}; + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h new file mode 100644 index 0000000000000000000000000000000000000000..c9eb2b66f3e0b2122639f6354dadf539819efc48 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h @@ -0,0 +1,802 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `L1Loss` module. +/// +/// Example: +/// ``` +/// L1Loss model(L1LossOptions(torch::kNone)); +/// ``` +struct TORCH_API L1LossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3(L1LossOptions, reduction, kNone, kMean, kSum) + + /// Specifies the reduction to apply to the output. + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::l1_loss`. +/// +/// See the documentation for `torch::nn::L1LossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone)); +/// ``` +using L1LossFuncOptions = L1LossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `KLDivLoss` module. +/// +/// Example: +/// ``` +/// KLDivLoss +/// model(KLDivLossOptions().reduction(torch::kNone).log_target(false)); +/// ``` +struct TORCH_API KLDivLossOptions { + typedef std::variant< + enumtype::kNone, + enumtype::kBatchMean, + enumtype::kSum, + enumtype::kMean> + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG4( + KLDivLossOptions, + reduction, + kNone, + kBatchMean, + kSum, + kMean) + + /// Specifies the reduction to apply to the output. + /// ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. Default: ``'mean'`` + TORCH_ARG(reduction_t, reduction) = torch::kMean; + + /// Specifies whether `target` is accepted in the log space. Default: False + TORCH_ARG(bool, log_target) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::kl_div`. +/// +/// See the documentation for `torch::nn::KLDivLossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::kl_div(input, target, +/// F::KLDivFuncOptions().reduction(torch::kNone).log_target(false)); +/// ``` +using KLDivFuncOptions = KLDivLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MSELoss` module. +/// +/// Example: +/// ``` +/// MSELoss model(MSELossOptions(torch::kNone)); +/// ``` +struct TORCH_API MSELossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3(MSELossOptions, reduction, kNone, kMean, kSum) + + /// Specifies the reduction to apply to the output. + /// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'`` + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::mse_loss`. +/// +/// See the documentation for `torch::nn::MSELossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone)); +/// ``` +using MSELossFuncOptions = MSELossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `BCELoss` module. +/// +/// Example: +/// ``` +/// BCELoss model(BCELossOptions().reduction(torch::kNone).weight(weight)); +/// ``` +struct TORCH_API BCELossOptions { + typedef std::variant + reduction_t; + + /// A manual rescaling weight given to the loss of each batch element. + TORCH_ARG(Tensor, weight) = {}; + /// Specifies the reduction to apply to the output. + /// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'`` + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::binary_cross_entropy`. +/// +/// See the documentation for `torch::nn::BCELossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::binary_cross_entropy(input, target, +/// F::BinaryCrossEntropyFuncOptions().weight(weight)); +/// ``` +using BinaryCrossEntropyFuncOptions = BCELossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `HingeEmbeddingLoss` module. +/// +/// Example: +/// ``` +/// HingeEmbeddingLoss +/// model(HingeEmbeddingLossOptions().margin(4).reduction(torch::kNone)); +/// ``` +struct TORCH_API HingeEmbeddingLossOptions { + typedef std::variant + reduction_t; + + /// Specifies the threshold for which the distance of a negative sample must + /// reach in order to incur zero loss. Default: 1 + TORCH_ARG(double, margin) = 1.0; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::hinge_embedding_loss`. +/// +/// See the documentation for `torch::nn::HingeEmbeddingLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::hinge_embedding_loss(input, target, +/// F::HingeEmbeddingLossFuncOptions().margin(2)); +/// ``` +using HingeEmbeddingLossFuncOptions = HingeEmbeddingLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MultiMarginLoss` module. +/// +/// Example: +/// ``` +/// MultiMarginLoss model(MultiMarginLossOptions().margin(2).weight(weight)); +/// ``` +struct TORCH_API MultiMarginLossOptions { + typedef std::variant + reduction_t; + + /// Has a default value of :math:`1`. :math:`1` and :math:`2` + /// are the only supported values. + TORCH_ARG(int64_t, p) = 1; + /// Has a default value of :math:`1`. + TORCH_ARG(double, margin) = 1.0; + /// A manual rescaling weight given to each + /// class. If given, it has to be a Tensor of size `C`. Otherwise, it is + /// treated as if having all ones. + TORCH_ARG(Tensor, weight) = Tensor(); + /// Specifies the reduction to apply to the output: + /// ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be + /// applied, + /// ``'mean'``: the sum of the output will be divided by the number of + /// elements in the output, ``'sum'``: the output will be summed. Default: + /// ``'mean'`` + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::multi_margin_loss`. +/// +/// See the documentation for `torch::nn::MultiMarginLossOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multi_margin_loss(input, target, +/// F::MultiMarginLossFuncOptions().margin(2).weight(weight)); +/// ``` +using MultiMarginLossFuncOptions = MultiMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `CosineEmbeddingLoss` module. +/// +/// Example: +/// ``` +/// CosineEmbeddingLoss model(CosineEmbeddingLossOptions().margin(0.5)); +/// ``` +struct TORCH_API CosineEmbeddingLossOptions { + typedef std::variant + reduction_t; + + /// Specifies the threshold for which the distance of a negative sample must + /// reach in order to incur zero loss. Should be a number from -1 to 1, 0 + /// to 0.5 is suggested. Default: 0.0 + TORCH_ARG(double, margin) = 0.0; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::cosine_embedding_loss`. +/// +/// See the documentation for `torch::nn::CosineEmbeddingLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cosine_embedding_loss(input1, input2, target, +/// F::CosineEmbeddingLossFuncOptions().margin(0.5)); +/// ``` +using CosineEmbeddingLossFuncOptions = CosineEmbeddingLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MultiLabelMarginLoss` module. +/// +/// Example: +/// ``` +/// MultiLabelMarginLoss model(MultiLabelMarginLossOptions(torch::kNone)); +/// ``` +struct TORCH_API MultiLabelMarginLossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3( + MultiLabelMarginLossOptions, + reduction, + kNone, + kMean, + kSum) + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::multilabel_margin_loss`. +/// +/// See the documentation for `torch::nn::MultiLabelMarginLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multilabel_margin_loss(input, target, +/// F::MultilabelMarginLossFuncOptions(torch::kNone)); +/// ``` +using MultilabelMarginLossFuncOptions = MultiLabelMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `SoftMarginLoss` module. +/// +/// Example: +/// ``` +/// SoftMarginLoss model(SoftMarginLossOptions(torch::kNone)); +/// ``` +struct TORCH_API SoftMarginLossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3( + SoftMarginLossOptions, + reduction, + kNone, + kMean, + kSum) + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::soft_margin_loss`. +/// +/// See the documentation for `torch::nn::SoftMarginLossOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::soft_margin_loss(input, target, +/// F::SoftMarginLossFuncOptions(torch::kNone)); +/// ``` +using SoftMarginLossFuncOptions = SoftMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MultiLabelSoftMarginLoss` module. +/// +/// Example: +/// ``` +/// MultiLabelSoftMarginLoss +/// model(MultiLabelSoftMarginLossOptions().reduction(torch::kNone).weight(weight)); +/// ``` +struct TORCH_API MultiLabelSoftMarginLossOptions { + typedef std::variant + reduction_t; + + /// A manual rescaling weight given to each + /// class. If given, it has to be a Tensor of size `C`. Otherwise, it is + /// treated as if having all ones. + TORCH_ARG(Tensor, weight) = Tensor(); + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::multilabel_soft_margin_loss`. +/// +/// See the documentation for `torch::nn::MultiLabelSoftMarginLossOptions` class +/// to learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multilabel_soft_margin_loss(input, target, +/// F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight)); +/// ``` +using MultilabelSoftMarginLossFuncOptions = MultiLabelSoftMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `TripletMarginLoss` module. +/// +/// Example: +/// ``` +/// TripletMarginLoss +/// model(TripletMarginLossOptions().margin(3).p(2).eps(1e-06).swap(false)); +/// ``` +struct TORCH_API TripletMarginLossOptions { + typedef std::variant + reduction_t; + + /// Specifies the threshold for which the distance of a negative sample must + /// reach in order to incur zero loss. Default: 1 + TORCH_ARG(double, margin) = 1.0; + /// Specifies the norm degree for pairwise distance. Default: 2 + TORCH_ARG(double, p) = 2.0; + TORCH_ARG(double, eps) = 1e-6; + /// The distance swap is described in detail in the paper Learning shallow + /// convolutional feature descriptors with triplet losses by V. Balntas, + /// E. Riba et al. Default: False + TORCH_ARG(bool, swap) = false; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::triplet_margin_loss`. +/// +/// See the documentation for `torch::nn::TripletMarginLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::triplet_margin_loss(anchor, positive, negative, +/// F::TripletMarginLossFuncOptions().margin(1.0)); +/// ``` +using TripletMarginLossFuncOptions = TripletMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `TripletMarginWithDistanceLoss` module. +/// +/// Example: +/// ``` +/// TripletMarginWithDistanceLoss +/// model(TripletMarginWithDistanceLossOptions().margin(3).swap(false)); +/// ``` +struct TORCH_API TripletMarginWithDistanceLossOptions { + typedef std::variant + reduction_t; + typedef std::function + distance_function_t; + + /// Specifies a nonnegative, real-valued function that quantifies the + /// closeness of two tensors. If not specified, `F::pairwise_distance` will + /// be used. Default: nullopt + TORCH_ARG(c10::optional, distance_function) = + c10::nullopt; + /// Specifies a nonnegative margin representing the minimum difference + /// between the positive and negative distances required for the loss to be 0. + /// Larger margins penalize cases where the negative examples are not distance + /// enough from the anchors, relative to the positives. Default: 1 + TORCH_ARG(double, margin) = 1.0; + /// Whether to use the distance swap described in the paper Learning shallow + /// convolutional feature descriptors with triplet losses by V. Balntas, + /// E. Riba et al. If True, and if the positive example is closer to the + /// negative example than the anchor is, swaps the positive example and the + /// anchor in the loss computation. Default: False + TORCH_ARG(bool, swap) = false; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::triplet_margin_with_distance_loss`. +/// +/// See the documentation for `torch::nn::TripletMarginWithDistanceLossOptions` +/// class to learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::triplet_margin_with_distance_loss(anchor, positive, negative, +/// F::TripletMarginWithDistanceLossFuncOptions().margin(1.0)); +/// ``` +using TripletMarginWithDistanceLossFuncOptions = + TripletMarginWithDistanceLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `CTCLoss` module. +/// +/// Example: +/// ``` +/// CTCLoss +/// model(CTCLossOptions().blank(42).zero_infinity(false).reduction(torch::kSum)); +/// ``` +struct TORCH_API CTCLossOptions { + typedef std::variant + reduction_t; + + /// blank label. Default `0`. + TORCH_ARG(int64_t, blank) = 0; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// Whether to zero infinite losses and the associated gradients. + /// Default: `false`. Infinite losses mainly occur when the inputs are + /// too short to be aligned to the targets. + TORCH_ARG(bool, zero_infinity) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::ctc_loss`. +/// +/// See the documentation for `torch::nn::CTCLossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::ctc_loss(log_probs, targets, input_lengths, target_lengths, +/// F::CTCLossFuncOptions().reduction(torch::kNone)); +/// ``` +using CTCLossFuncOptions = CTCLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `SmoothL1Loss` module. +/// +/// Example: +/// ``` +/// SmoothL1Loss model(SmoothL1LossOptions().reduction(torch::kNone).beta(0.5)); +/// ``` +struct TORCH_API SmoothL1LossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3( + SmoothL1LossOptions, + reduction, + kNone, + kMean, + kSum) + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// Specifies the threshold at which to change between L1 and L2 loss. + /// If beta is not specified, a value of 1.0 will be used. + /// Default: nullopt + TORCH_ARG(c10::optional, beta) = c10::nullopt; +}; + +namespace functional { +/// Options for `torch::nn::functional::smooth_l1_loss`. +/// +/// See the documentation for `torch::nn::SmoothL1LossOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone)); +/// ``` +using SmoothL1LossFuncOptions = SmoothL1LossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `HuberLoss` module. +/// +/// Example: +/// ``` +/// HuberLoss model(HuberLossOptions().reduction(torch::kNone).delta(0.5)); +/// ``` +struct TORCH_API HuberLossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3( + HuberLossOptions, + reduction, + kNone, + kMean, + kSum) + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// Specifies the threshold at which to change between L1 and L2 loss. + /// Default: 1.0 + TORCH_ARG(double, delta) = 1.0; +}; + +namespace functional { +/// Options for `torch::nn::functional::huber_loss`. +/// +/// See the documentation for `torch::nn::HuberLossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::huber_loss(input, target, F::HuberLossFuncOptions(torch::kNone)); +/// ``` +using HuberLossFuncOptions = HuberLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `PoissonNLLLoss` module. +/// +/// Example: +/// ``` +/// PoissonNLLLoss +/// model(PoissonNLLLossOptions().log_input(false).full(true).eps(0.42).reduction(torch::kSum)); +/// ``` +struct TORCH_API PoissonNLLLossOptions { + typedef std::variant + reduction_t; + + /// if true the loss is computed as `exp(input) - target * input`, + /// if false the loss is `input - target * log(input + eps)`. + TORCH_ARG(bool, log_input) = true; + /// whether to compute full loss, i.e. to add the Stirling approximation term + /// target * log(target) - target + 0.5 * log(2 * pi * target). + TORCH_ARG(bool, full) = false; + /// Small value to avoid evaluation of `log(0)` when `log_input = false`. + /// Default: 1e-8 + TORCH_ARG(double, eps) = 1e-8; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::poisson_nll_loss`. +/// +/// See the documentation for `torch::nn::PoissonNLLLossOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::poisson_nll_loss(input, target, +/// F::PoissonNLLLossFuncOptions().reduction(torch::kNone)); +/// ``` +using PoissonNLLLossFuncOptions = PoissonNLLLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MarginRankingLoss` module. +/// +/// Example: +/// ``` +/// MarginRankingLoss +/// model(MarginRankingLossOptions().margin(0.5).reduction(torch::kSum)); +/// ``` +struct TORCH_API MarginRankingLossOptions { + typedef std::variant + reduction_t; + + /// Has a default value of `0`. + TORCH_ARG(double, margin) = 0; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::margin_ranking_loss`. +/// +/// See the documentation for `torch::nn::MarginRankingLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::margin_ranking_loss(input1, input2, target, +/// F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum)); +/// ``` +using MarginRankingLossFuncOptions = MarginRankingLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `NLLLoss` module. +/// +/// Example: +/// ``` +/// NLLLoss model(NLLLossOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +struct TORCH_API NLLLossOptions { + typedef std::variant + reduction_t; + + /// A manual rescaling weight given to each + /// class. If given, it has to be a Tensor of size `C`. Otherwise, it is + /// treated as if having all ones. + TORCH_ARG(Tensor, weight) = {}; + /// Specifies a target value that is ignored + /// and does not contribute to the input gradient. + TORCH_ARG(int64_t, ignore_index) = -100; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::nll_loss`. +/// +/// See the documentation for `torch::nn::NLLLossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::nll_loss(input, target, +/// F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +using NLLLossFuncOptions = NLLLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `CrossEntropyLoss` module. +/// +/// Example: +/// ``` +/// CrossEntropyLoss +/// model(CrossEntropyLossOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +struct TORCH_API CrossEntropyLossOptions { + typedef std::variant + reduction_t; + + /// A manual rescaling weight given to each class. If given, has to be a + /// Tensor of size C + TORCH_ARG(Tensor, weight) = {}; + /// Specifies a target value that is ignored + /// and does not contribute to the input gradient. + TORCH_ARG(int64_t, ignore_index) = -100; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// Specifies the amount of smoothing when computing the loss. Default: 0.0 + TORCH_ARG(double, label_smoothing) = 0.0; +}; + +namespace functional { +/// Options for `torch::nn::functional::cross_entropy`. +/// +/// See the documentation for `torch::nn::CrossEntropyLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cross_entropy(input, target, +/// F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +using CrossEntropyFuncOptions = CrossEntropyLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `BCEWithLogitsLoss` module. +/// +/// Example: +/// ``` +/// BCEWithLogitsLoss +/// model(BCEWithLogitsLossOptions().reduction(torch::kNone).weight(weight)); +/// ``` +struct TORCH_API BCEWithLogitsLossOptions { + typedef std::variant + reduction_t; + /// A manual rescaling weight given to the loss of each batch element. + /// If given, has to be a Tensor of size `nbatch`. + TORCH_ARG(Tensor, weight) = {}; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// A weight of positive examples. + /// Must be a vector with length equal to the number of classes. + TORCH_ARG(Tensor, pos_weight) = {}; +}; + +namespace functional { +/// Options for `torch::nn::functional::binary_cross_entropy_with_logits`. +/// +/// See the documentation for `torch::nn::BCEWithLogitsLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::binary_cross_entropy_with_logits(input, target, +/// F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum)); +/// ``` +using BinaryCrossEntropyWithLogitsFuncOptions = BCEWithLogitsLossOptions; +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/normalization.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/normalization.h new file mode 100644 index 0000000000000000000000000000000000000000..ae8c206736d50dee245565ddb201346f527925d3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/normalization.h @@ -0,0 +1,192 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `LayerNorm` module. +/// +/// Example: +/// ``` +/// LayerNorm model(LayerNormOptions({2, +/// 2}).elementwise_affine(false).eps(2e-5)); +/// ``` +struct TORCH_API LayerNormOptions { + /* implicit */ LayerNormOptions(std::vector normalized_shape); + /// input shape from an expected input. + TORCH_ARG(std::vector, normalized_shape); + /// a value added to the denominator for numerical stability. ``Default: + /// 1e-5``. + TORCH_ARG(double, eps) = 1e-5; + /// a boolean value that when set to ``true``, this module + /// has learnable per-element affine parameters initialized to ones (for + /// weights) and zeros (for biases). ``Default: true``. + TORCH_ARG(bool, elementwise_affine) = true; +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::layer_norm`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5)); +/// ``` +struct TORCH_API LayerNormFuncOptions { + /* implicit */ LayerNormFuncOptions(std::vector normalized_shape); + /// input shape from an expected input. + TORCH_ARG(std::vector, normalized_shape); + + TORCH_ARG(Tensor, weight) = {}; + + TORCH_ARG(Tensor, bias) = {}; + + /// a value added to the denominator for numerical stability. ``Default: + /// 1e-5``. + TORCH_ARG(double, eps) = 1e-5; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `LocalResponseNorm` module. +/// +/// Example: +/// ``` +/// LocalResponseNorm +/// model(LocalResponseNormOptions(2).alpha(0.0002).beta(0.85).k(2.)); +/// ``` +struct TORCH_API LocalResponseNormOptions { + /* implicit */ LocalResponseNormOptions(int64_t size) : size_(size) {} + /// amount of neighbouring channels used for normalization + TORCH_ARG(int64_t, size); + + /// multiplicative factor. Default: 1e-4 + TORCH_ARG(double, alpha) = 1e-4; + + /// exponent. Default: 0.75 + TORCH_ARG(double, beta) = 0.75; + + /// additive factor. Default: 1 + TORCH_ARG(double, k) = 1.; +}; + +namespace functional { +/// Options for `torch::nn::functional::local_response_norm`. +/// +/// See the documentation for `torch::nn::LocalResponseNormOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::local_response_norm(x, F::LocalResponseNormFuncOptions(2)); +/// ``` +using LocalResponseNormFuncOptions = LocalResponseNormOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `CrossMapLRN2d` module. +/// +/// Example: +/// ``` +/// CrossMapLRN2d model(CrossMapLRN2dOptions(3).alpha(1e-5).beta(0.1).k(10)); +/// ``` +struct TORCH_API CrossMapLRN2dOptions { + CrossMapLRN2dOptions(int64_t size); + + TORCH_ARG(int64_t, size); + + TORCH_ARG(double, alpha) = 1e-4; + + TORCH_ARG(double, beta) = 0.75; + + TORCH_ARG(int64_t, k) = 1; +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::normalize`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1)); +/// ``` +struct TORCH_API NormalizeFuncOptions { + /// The exponent value in the norm formulation. Default: 2.0 + TORCH_ARG(double, p) = 2.0; + /// The dimension to reduce. Default: 1 + TORCH_ARG(int64_t, dim) = 1; + /// Small value to avoid division by zero. Default: 1e-12 + TORCH_ARG(double, eps) = 1e-12; + /// the output tensor. If `out` is used, this + /// operation won't be differentiable. + TORCH_ARG(c10::optional, out) = c10::nullopt; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `GroupNorm` module. +/// +/// Example: +/// ``` +/// GroupNorm model(GroupNormOptions(2, 2).eps(2e-5).affine(false)); +/// ``` +struct TORCH_API GroupNormOptions { + /* implicit */ GroupNormOptions(int64_t num_groups, int64_t num_channels); + + /// number of groups to separate the channels into + TORCH_ARG(int64_t, num_groups); + /// number of channels expected in input + TORCH_ARG(int64_t, num_channels); + /// a value added to the denominator for numerical stability. Default: 1e-5 + TORCH_ARG(double, eps) = 1e-5; + /// a boolean value that when set to ``true``, this module + /// has learnable per-channel affine parameters initialized to ones (for + /// weights) and zeros (for biases). Default: ``true``. + TORCH_ARG(bool, affine) = true; +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::group_norm`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::group_norm(input, F::GroupNormFuncOptions(2).eps(2e-5)); +/// ``` +struct TORCH_API GroupNormFuncOptions { + /* implicit */ GroupNormFuncOptions(int64_t num_groups); + + /// number of groups to separate the channels into + TORCH_ARG(int64_t, num_groups); + + TORCH_ARG(Tensor, weight) = {}; + + TORCH_ARG(Tensor, bias) = {}; + + /// a value added to the denominator for numerical stability. Default: 1e-5 + TORCH_ARG(double, eps) = 1e-5; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/padding.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/padding.h new file mode 100644 index 0000000000000000000000000000000000000000..8b8312f78ee649bd4127da10dd906790878dbd71 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/padding.h @@ -0,0 +1,219 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for a `D`-dimensional ReflectionPad module. +template +struct TORCH_API ReflectionPadOptions { + ReflectionPadOptions(ExpandingArray padding) : padding_(padding) {} + + /// The size of the padding. + /// If it is `int`, uses the same padding in all boundaries. + /// If it is a 2-`tuple` (for ReflectionPad1d), uses (padding_left, + /// padding_right). If it is a 4-`tuple` (for ReflectionPad2d), uses + /// (padding_left, padding_right, padding_top, padding_bottom). If it is a + /// 6-`tuple` (for ReflectionPad3d), uses (padding_left, padding_right, + /// padding_top, padding_bottom, padding_front, padding_back). + + TORCH_ARG(ExpandingArray, padding); +}; + +/// `ReflectionPadOptions` specialized for the `ReflectionPad1d` module. +/// +/// Example: +/// ``` +/// ReflectionPad1d model(ReflectionPad1dOptions({3, 1})); +/// ``` +using ReflectionPad1dOptions = ReflectionPadOptions<1>; + +/// `ReflectionPadOptions` specialized for the `ReflectionPad2d` module. +/// +/// Example: +/// ``` +/// ReflectionPad2d model(ReflectionPad2dOptions({1, 1, 2, 0})); +/// ``` +using ReflectionPad2dOptions = ReflectionPadOptions<2>; + +/// `ReflectionPadOptions` specialized for the `ReflectionPad3d` module. +/// +/// Example: +/// ``` +/// ReflectionPad3d model(ReflectionPad3dOptions({1, 1, 2, 0, 1, 1})); +/// ``` +using ReflectionPad3dOptions = ReflectionPadOptions<3>; + +// ============================================================================ + +/// Options for a `D`-dimensional ReplicationPad module. +template +struct TORCH_API ReplicationPadOptions { + ReplicationPadOptions(ExpandingArray padding) : padding_(padding) {} + + /// The size of the padding. + /// - If it is `int`, uses the same padding in all boundaries. + /// - If it is a 2-`tuple` (for ReplicationPad1d), uses (padding_left, + /// padding_right). + /// - If it is a 4-`tuple` (for ReplicationPad2d), uses (padding_left, + /// padding_right, padding_top, padding_bottom). + /// - If it is a 6-`tuple` (for ReplicationPad3d), uses + /// (padding_left, padding_right, padding_top, padding_bottom, + /// padding_front, padding_back). + TORCH_ARG(ExpandingArray, padding); +}; + +/// `ReplicationPadOptions` specialized for the `ReplicationPad1d` module. +/// +/// Example: +/// ``` +/// ReplicationPad1d model(ReplicationPad1dOptions({3, 1})); +/// ``` +using ReplicationPad1dOptions = ReplicationPadOptions<1>; + +/// `ReplicationPadOptions` specialized for the `ReplicationPad2d` module. +/// +/// Example: +/// ``` +/// ReplicationPad2d model(ReplicationPad2dOptions({1, 1, 2, 0})); +/// ``` +using ReplicationPad2dOptions = ReplicationPadOptions<2>; + +/// `ReplicationPadOptions` specialized for the `ReplicationPad3d` module. +/// +/// Example: +/// ``` +/// ReplicationPad3d model(ReplicationPad3dOptions({1, 2, 1, 2, 1, 2})); +/// ``` +using ReplicationPad3dOptions = ReplicationPadOptions<3>; + +// ============================================================================ + +template +struct TORCH_API ZeroPadOptions { + ZeroPadOptions(ExpandingArray padding) : padding_(padding) {} + + /// The size of the padding. + /// - If it is `int`, uses the same padding in all boundaries. + /// - If it is a 2-`tuple` (for ZeroPad1d), uses (padding_left, + /// padding_right). + /// - If it is a 4-`tuple` (for ZeroPad2d), uses (padding_left, padding_right, + /// padding_top, padding_bottom). + /// - If it is a 6-`tuple` (for ZeroPad3d), uses + /// (padding_left, padding_right, padding_top, padding_bottom, + /// padding_front, padding_back). + TORCH_ARG(ExpandingArray, padding); +}; + +/// `ZeroPadOptions` specialized for the `ZeroPad1d` module. +/// +/// Example: +/// ``` +/// ConstantPad1d model(ConstantPad1dOptions({3, 1}); +/// ``` +using ZeroPad1dOptions = ZeroPadOptions<1>; + +/// `ZeroPadOptions` specialized for the `ZeroPad2d` module. +/// +/// Example: +/// ``` +/// ConstantPad2d model(ConstantPad2dOptions({1, 1, 2, 0}); +/// ``` +using ZeroPad2dOptions = ZeroPadOptions<2>; + +/// `ZeroPadOptions` specialized for the `ZeroPad3d` module. +/// +/// Example: +/// ``` +/// ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2}); +/// ``` +using ZeroPad3dOptions = ZeroPadOptions<3>; + +// ============================================================================ + +/// Options for a `D`-dimensional ConstantPad module. +template +struct TORCH_API ConstantPadOptions { + ConstantPadOptions(ExpandingArray padding, double value) + : padding_(padding), value_(value) {} + + /// The size of the padding. + /// - If it is `int`, uses the same padding in all boundaries. + /// - If it is a 2-`tuple` (for ConstantPad1d), uses (padding_left, + /// padding_right). + /// - If it is a 4-`tuple` (for ConstantPad2d), uses (padding_left, + /// padding_right, padding_top, padding_bottom). + /// - If it is a 6-`tuple` (for ConstantPad3d), uses + /// (padding_left, padding_right, padding_top, padding_bottom, + /// padding_front, padding_back). + TORCH_ARG(ExpandingArray, padding); + + /// Fill value for constant padding. + TORCH_ARG(double, value); +}; + +/// `ConstantPadOptions` specialized for the `ConstantPad1d` module. +/// +/// Example: +/// ``` +/// ConstantPad1d model(ConstantPad1dOptions({3, 1}, 3.5)); +/// ``` +using ConstantPad1dOptions = ConstantPadOptions<1>; + +/// `ConstantPadOptions` specialized for the `ConstantPad2d` module. +/// +/// Example: +/// ``` +/// ConstantPad2d model(ConstantPad2dOptions({3, 0, 2, 1}, 3.5)); +/// ``` +using ConstantPad2dOptions = ConstantPadOptions<2>; + +/// `ConstantPadOptions` specialized for the `ConstantPad3d` module. +/// +/// Example: +/// ``` +/// ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2}, 3.5)); +/// ``` +using ConstantPad3dOptions = ConstantPadOptions<3>; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::pad`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::pad(input, F::PadFuncOptions({1, 2, 2, 1, 1, +/// 2}).mode(torch::kReplicate)); +/// ``` +struct TORCH_API PadFuncOptions { + typedef std::variant< + enumtype::kConstant, + enumtype::kReflect, + enumtype::kReplicate, + enumtype::kCircular> + mode_t; + + PadFuncOptions(std::vector pad); + + /// m-elements tuple, where m/2 <= input dimensions and m is even. + TORCH_ARG(std::vector, pad); + + /// "constant", "reflect", "replicate" or "circular". Default: "constant" + TORCH_ARG(mode_t, mode) = torch::kConstant; + + /// fill value for "constant" padding. Default: 0 + TORCH_ARG(double, value) = 0; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pixelshuffle.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pixelshuffle.h new file mode 100644 index 0000000000000000000000000000000000000000..859da98616db155e1729b2bbc5e37dab5c529a44 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pixelshuffle.h @@ -0,0 +1,65 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `PixelShuffle` module. +/// +/// Example: +/// ``` +/// PixelShuffle model(PixelShuffleOptions(5)); +/// ``` +struct TORCH_API PixelShuffleOptions { + PixelShuffleOptions(int64_t upscale_factor) + : upscale_factor_(upscale_factor) {} + + /// Factor to increase spatial resolution by + TORCH_ARG(int64_t, upscale_factor); +}; + +/// Options for the `PixelUnshuffle` module. +/// +/// Example: +/// ``` +/// PixelUnshuffle model(PixelUnshuffleOptions(5)); +/// ``` +struct TORCH_API PixelUnshuffleOptions { + /* implicit */ PixelUnshuffleOptions(int64_t downscale_factor) + : downscale_factor_(downscale_factor) {} + + /// Factor to decrease spatial resolution by + TORCH_ARG(int64_t, downscale_factor); +}; + +namespace functional { +/// Options for `torch::nn::functional::pixel_shuffle`. +/// +/// See the documentation for `torch::nn::PixelShuffleOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2)); +/// ``` +using PixelShuffleFuncOptions = PixelShuffleOptions; + +/// Options for `torch::nn::functional::pixel_unshuffle`. +/// +/// See the documentation for `torch::nn::PixelUnshuffleOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::pixel_unshuffle(x, F::PixelUnshuffleFuncOptions(2)); +/// ``` +using PixelUnshuffleFuncOptions = PixelUnshuffleOptions; +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pooling.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pooling.h new file mode 100644 index 0000000000000000000000000000000000000000..219ae7fd5dd01c8b5e782c53b284667f2e2c6485 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pooling.h @@ -0,0 +1,573 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for a `D`-dimensional avgpool module. +template +struct AvgPoolOptions { + AvgPoolOptions(ExpandingArray kernel_size) + : kernel_size_(kernel_size), stride_(kernel_size) {} + + /// the size of the window to take an average over + TORCH_ARG(ExpandingArray, kernel_size); + + /// the stride of the window. Default value is `kernel_size` + TORCH_ARG(ExpandingArray, stride); + + /// implicit zero padding to be added on both sides + TORCH_ARG(ExpandingArray, padding) = 0; + + /// when True, will use `ceil` instead of `floor` to compute the output shape + TORCH_ARG(bool, ceil_mode) = false; + + /// when True, will include the zero-padding in the averaging calculation + TORCH_ARG(bool, count_include_pad) = true; + + /// if specified, it will be used as divisor, otherwise size of the pooling + /// region will be used. + + TORCH_ARG(c10::optional, divisor_override) = c10::nullopt; +}; + +/// `AvgPoolOptions` specialized for the `AvgPool1d` module. +/// +/// Example: +/// ``` +/// AvgPool1d model(AvgPool1dOptions(3).stride(2)); +/// ``` +using AvgPool1dOptions = AvgPoolOptions<1>; + +/// `AvgPoolOptions` specialized for the `AvgPool2d` module. +/// +/// Example: +/// ``` +/// AvgPool2d model(AvgPool2dOptions({3, 2}).stride({2, 2})); +/// ``` +using AvgPool2dOptions = AvgPoolOptions<2>; + +/// `AvgPoolOptions` specialized for the `AvgPool3d` module. +/// +/// Example: +/// ``` +/// AvgPool3d model(AvgPool3dOptions(5).stride(2)); +/// ``` +using AvgPool3dOptions = AvgPoolOptions<3>; + +namespace functional { +/// Options for `torch::nn::functional::avg_pool1d`. +/// +/// See the documentation for `torch::nn::AvgPool1dOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2)); +/// ``` +using AvgPool1dFuncOptions = AvgPool1dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::avg_pool2d`. +/// +/// See the documentation for `torch::nn::AvgPool2dOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2)); +/// ``` +using AvgPool2dFuncOptions = AvgPool2dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::avg_pool3d`. +/// +/// See the documentation for `torch::nn::AvgPool3dOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2)); +/// ``` +using AvgPool3dFuncOptions = AvgPool3dOptions; +} // namespace functional + +// ============================================================================ + +/// Options for a `D`-dimensional maxpool module. +template +struct MaxPoolOptions { + MaxPoolOptions(ExpandingArray kernel_size) + : kernel_size_(kernel_size), stride_(kernel_size) {} + + /// the size of the window to take a max over + TORCH_ARG(ExpandingArray, kernel_size); + + /// the stride of the window. Default value is `kernel_size + TORCH_ARG(ExpandingArray, stride); + + /// implicit zero padding to be added on both sides + TORCH_ARG(ExpandingArray, padding) = 0; + + /// a parameter that controls the stride of elements in the window + TORCH_ARG(ExpandingArray, dilation) = 1; + + /// when True, will use `ceil` instead of `floor` to compute the output shape + TORCH_ARG(bool, ceil_mode) = false; +}; + +/// `MaxPoolOptions` specialized for the `MaxPool1d` module. +/// +/// Example: +/// ``` +/// MaxPool1d model(MaxPool1dOptions(3).stride(2)); +/// ``` +using MaxPool1dOptions = MaxPoolOptions<1>; + +/// `MaxPoolOptions` specialized for the `MaxPool2d` module. +/// +/// Example: +/// ``` +/// MaxPool2d model(MaxPool2dOptions({3, 2}).stride({2, 2})); +/// ``` +using MaxPool2dOptions = MaxPoolOptions<2>; + +/// `MaxPoolOptions` specialized for the `MaxPool3d` module. +/// +/// Example: +/// ``` +/// MaxPool3d model(MaxPool3dOptions(3).stride(2)); +/// ``` +using MaxPool3dOptions = MaxPoolOptions<3>; + +namespace functional { +/// Options for `torch::nn::functional::max_pool1d` and +/// `torch::nn::functional::max_pool1d_with_indices`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2)); +/// ``` +using MaxPool1dFuncOptions = MaxPool1dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::max_pool2d` and +/// `torch::nn::functional::max_pool2d_with_indices`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2)); +/// ``` +using MaxPool2dFuncOptions = MaxPool2dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::max_pool3d` and +/// `torch::nn::functional::max_pool3d_with_indices`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2)); +/// ``` +using MaxPool3dFuncOptions = MaxPool3dOptions; +} // namespace functional + +// ============================================================================ + +/// Options for a `D`-dimensional adaptive maxpool module. +template +struct AdaptiveMaxPoolOptions { + AdaptiveMaxPoolOptions(output_size_t output_size) + : output_size_(output_size) {} + + /// the target output size + TORCH_ARG(output_size_t, output_size); +}; + +/// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool1d` module. +/// +/// Example: +/// ``` +/// AdaptiveMaxPool1d model(AdaptiveMaxPool1dOptions(3)); +/// ``` +using AdaptiveMaxPool1dOptions = AdaptiveMaxPoolOptions>; + +/// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool2d` module. +/// +/// Example: +/// ``` +/// AdaptiveMaxPool2d model(AdaptiveMaxPool2dOptions({3, 2})); +/// ``` +using AdaptiveMaxPool2dOptions = + AdaptiveMaxPoolOptions>; + +/// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool3d` module. +/// +/// Example: +/// ``` +/// AdaptiveMaxPool3d model(AdaptiveMaxPool3dOptions(3)); +/// ``` +using AdaptiveMaxPool3dOptions = + AdaptiveMaxPoolOptions>; + +namespace functional { +/// Options for `torch::nn::functional::adaptive_max_pool1d` and +/// `torch::nn::functional::adaptive_max_pool1d_with_indices` +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3)); +/// ``` +using AdaptiveMaxPool1dFuncOptions = AdaptiveMaxPool1dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::adaptive_max_pool2d` and +/// `torch::nn::functional::adaptive_max_pool2d_with_indices` +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3)); +/// ``` +using AdaptiveMaxPool2dFuncOptions = AdaptiveMaxPool2dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::adaptive_max_pool3d` and +/// `torch::nn::functional::adaptive_max_pool3d_with_indices` +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3)); +/// ``` +using AdaptiveMaxPool3dFuncOptions = AdaptiveMaxPool3dOptions; +} // namespace functional + +// ============================================================================ + +/// Options for a `D`-dimensional adaptive avgpool module. +template +struct AdaptiveAvgPoolOptions { + AdaptiveAvgPoolOptions(output_size_t output_size) + : output_size_(output_size) {} + + /// the target output size + TORCH_ARG(output_size_t, output_size); +}; + +/// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool1d` module. +/// +/// Example: +/// ``` +/// AdaptiveAvgPool1d model(AdaptiveAvgPool1dOptions(5)); +/// ``` +using AdaptiveAvgPool1dOptions = AdaptiveAvgPoolOptions>; + +/// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool2d` module. +/// +/// Example: +/// ``` +/// AdaptiveAvgPool2d model(AdaptiveAvgPool2dOptions({3, 2})); +/// ``` +using AdaptiveAvgPool2dOptions = + AdaptiveAvgPoolOptions>; + +/// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool3d` module. +/// +/// Example: +/// ``` +/// AdaptiveAvgPool3d model(AdaptiveAvgPool3dOptions(3)); +/// ``` +using AdaptiveAvgPool3dOptions = + AdaptiveAvgPoolOptions>; + +namespace functional { +/// Options for `torch::nn::functional::adaptive_avg_pool1d`. +/// +/// See the documentation for `torch::nn::AdaptiveAvgPool1dOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3)); +/// ``` +using AdaptiveAvgPool1dFuncOptions = AdaptiveAvgPool1dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::adaptive_avg_pool2d`. +/// +/// See the documentation for `torch::nn::AdaptiveAvgPool2dOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3)); +/// ``` +using AdaptiveAvgPool2dFuncOptions = AdaptiveAvgPool2dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::adaptive_avg_pool3d`. +/// +/// See the documentation for `torch::nn::AdaptiveAvgPool3dOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3)); +/// ``` +using AdaptiveAvgPool3dFuncOptions = AdaptiveAvgPool3dOptions; +} // namespace functional + +// ============================================================================ + +/// Options for a `D`-dimensional maxunpool module. +template +struct MaxUnpoolOptions { + MaxUnpoolOptions(ExpandingArray kernel_size) + : kernel_size_(kernel_size), stride_(kernel_size) {} + + /// the size of the window to take a max over + TORCH_ARG(ExpandingArray, kernel_size); + + /// the stride of the window. Default value is `kernel_size + TORCH_ARG(ExpandingArray, stride); + + /// implicit zero padding to be added on both sides + TORCH_ARG(ExpandingArray, padding) = 0; +}; + +/// `MaxUnpoolOptions` specialized for the `MaxUnpool1d` module. +/// +/// Example: +/// ``` +/// MaxUnpool1d model(MaxUnpool1dOptions(3).stride(2).padding(1)); +/// ``` +using MaxUnpool1dOptions = MaxUnpoolOptions<1>; + +/// `MaxUnpoolOptions` specialized for the `MaxUnpool2d` module. +/// +/// Example: +/// ``` +/// MaxUnpool2d model(MaxUnpool2dOptions(3).stride(2).padding(1)); +/// ``` +using MaxUnpool2dOptions = MaxUnpoolOptions<2>; + +/// `MaxUnpoolOptions` specialized for the `MaxUnpool3d` module. +/// +/// Example: +/// ``` +/// MaxUnpool3d model(MaxUnpool3dOptions(3).stride(2).padding(1)); +/// ``` +using MaxUnpool3dOptions = MaxUnpoolOptions<3>; + +// ============================================================================ + +namespace functional { + +/// Options for a `D`-dimensional maxunpool functional. +template +struct MaxUnpoolFuncOptions { + MaxUnpoolFuncOptions(ExpandingArray kernel_size) + : kernel_size_(kernel_size), stride_(kernel_size) {} + + /// the size of the window to take a max over + TORCH_ARG(ExpandingArray, kernel_size); + + /// the stride of the window. Default value is `kernel_size + TORCH_ARG(ExpandingArray, stride); + + /// implicit zero padding to be added on both sides + TORCH_ARG(ExpandingArray, padding) = 0; + + /// the targeted output size + TORCH_ARG(c10::optional>, output_size) = c10::nullopt; +}; + +/// `MaxUnpoolFuncOptions` specialized for +/// `torch::nn::functional::max_unpool1d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_unpool1d(x, indices, +/// F::MaxUnpool1dFuncOptions(3).stride(2).padding(1)); +/// ``` +using MaxUnpool1dFuncOptions = MaxUnpoolFuncOptions<1>; + +/// `MaxUnpoolFuncOptions` specialized for +/// `torch::nn::functional::max_unpool2d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_unpool2d(x, indices, +/// F::MaxUnpool2dFuncOptions(3).stride(2).padding(1)); +/// ``` +using MaxUnpool2dFuncOptions = MaxUnpoolFuncOptions<2>; + +/// `MaxUnpoolFuncOptions` specialized for +/// `torch::nn::functional::max_unpool3d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3)); +/// ``` +using MaxUnpool3dFuncOptions = MaxUnpoolFuncOptions<3>; + +} // namespace functional + +// ============================================================================ + +/// Options for a `D`-dimensional fractional maxpool module. +template +struct FractionalMaxPoolOptions { + FractionalMaxPoolOptions(ExpandingArray kernel_size) + : kernel_size_(kernel_size) {} + + /// the size of the window to take a max over + TORCH_ARG(ExpandingArray, kernel_size); + + /// the target output size of the image + TORCH_ARG(c10::optional>, output_size) = c10::nullopt; + + /// If one wants to have an output size as a ratio of the input size, this + /// option can be given. This has to be a number or tuple in the range (0, 1) + using ExpandingArrayDouble = torch::ExpandingArray; + TORCH_ARG(c10::optional, output_ratio) = c10::nullopt; + + TORCH_ARG(torch::Tensor, _random_samples) = Tensor(); +}; + +/// `FractionalMaxPoolOptions` specialized for the `FractionalMaxPool2d` module. +/// +/// Example: +/// ``` +/// FractionalMaxPool2d model(FractionalMaxPool2dOptions(5).output_size(1)); +/// ``` +using FractionalMaxPool2dOptions = FractionalMaxPoolOptions<2>; + +/// `FractionalMaxPoolOptions` specialized for the `FractionalMaxPool3d` module. +/// +/// Example: +/// ``` +/// FractionalMaxPool3d model(FractionalMaxPool3dOptions(5).output_size(1)); +/// ``` +using FractionalMaxPool3dOptions = FractionalMaxPoolOptions<3>; + +namespace functional { +/// Options for `torch::nn::functional::fractional_max_pool2d` and +/// `torch::nn::functional::fractional_max_pool2d_with_indices` +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fractional_max_pool2d(x, +/// F::FractionalMaxPool2dFuncOptions(3).output_size(2)); +/// ``` +using FractionalMaxPool2dFuncOptions = FractionalMaxPool2dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::fractional_max_pool3d` and +/// `torch::nn::functional::fractional_max_pool3d_with_indices` +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fractional_max_pool3d(x, +/// F::FractionalMaxPool3dFuncOptions(3).output_size(2)); +/// ``` +using FractionalMaxPool3dFuncOptions = FractionalMaxPool3dOptions; +} // namespace functional + +// ============================================================================ + +/// Options for a `D`-dimensional lppool module. +template +struct LPPoolOptions { + LPPoolOptions(double norm_type, ExpandingArray kernel_size) + : norm_type_(norm_type), + kernel_size_(kernel_size), + stride_(kernel_size) {} + + TORCH_ARG(double, norm_type); + + // the size of the window to take an average over + TORCH_ARG(ExpandingArray, kernel_size); + + // the stride of the window. Default value is `kernel_size` + TORCH_ARG(ExpandingArray, stride); + + // when True, will use `ceil` instead of `floor` to compute the output shape + TORCH_ARG(bool, ceil_mode) = false; +}; + +/// `LPPoolOptions` specialized for the `LPPool1d` module. +/// +/// Example: +/// ``` +/// LPPool1d model(LPPool1dOptions(1, 2).stride(5).ceil_mode(true)); +/// ``` +using LPPool1dOptions = LPPoolOptions<1>; + +/// `LPPoolOptions` specialized for the `LPPool2d` module. +/// +/// Example: +/// ``` +/// LPPool2d model(LPPool2dOptions(1, std::vector({3, 4})).stride({5, +/// 6}).ceil_mode(true)); +/// ``` +using LPPool2dOptions = LPPoolOptions<2>; + +namespace functional { +/// Options for `torch::nn::functional::lp_pool1d`. +/// +/// See the documentation for `torch::nn::LPPool1dOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2)); +/// ``` +using LPPool1dFuncOptions = LPPool1dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::lp_pool2d`. +/// +/// See the documentation for `torch::nn::LPPool2dOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2)); +/// ``` +using LPPool2dFuncOptions = LPPool2dOptions; +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/rnn.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/rnn.h new file mode 100644 index 0000000000000000000000000000000000000000..133acc500276d2c0577e863f5bdad4ca6528cc14 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/rnn.h @@ -0,0 +1,236 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +namespace detail { + +/// Common options for RNN, LSTM and GRU modules. +struct TORCH_API RNNOptionsBase { + typedef std::variant< + enumtype::kLSTM, + enumtype::kGRU, + enumtype::kRNN_TANH, + enumtype::kRNN_RELU> + rnn_options_base_mode_t; + + RNNOptionsBase( + rnn_options_base_mode_t mode, + int64_t input_size, + int64_t hidden_size); + + TORCH_ARG(rnn_options_base_mode_t, mode); + /// The number of features of a single sample in the input sequence `x`. + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h`. + TORCH_ARG(int64_t, hidden_size); + /// The number of recurrent layers (cells) to use. + TORCH_ARG(int64_t, num_layers) = 1; + /// Whether a bias term should be added to all linear operations. + TORCH_ARG(bool, bias) = true; + /// If true, the input sequence should be provided as `(batch, sequence, + /// features)`. If false (default), the expected layout is `(sequence, batch, + /// features)`. + TORCH_ARG(bool, batch_first) = false; + /// If non-zero, adds dropout with the given probability to the output of each + /// RNN layer, except the final layer. + TORCH_ARG(double, dropout) = 0.0; + /// Whether to make the RNN bidirectional. + TORCH_ARG(bool, bidirectional) = false; + /// Cell projection dimension. If 0, projections are not added. Can only be + /// used for LSTMs. + TORCH_ARG(int64_t, proj_size) = 0; +}; + +} // namespace detail + +/// Options for the `RNN` module. +/// +/// Example: +/// ``` +/// RNN model(RNNOptions(128, +/// 64).num_layers(3).dropout(0.2).nonlinearity(torch::kTanh)); +/// ``` +struct TORCH_API RNNOptions { + typedef std::variant nonlinearity_t; + + RNNOptions(int64_t input_size, int64_t hidden_size); + + /// The number of expected features in the input `x` + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h` + TORCH_ARG(int64_t, hidden_size); + /// Number of recurrent layers. E.g., setting ``num_layers=2`` + /// would mean stacking two RNNs together to form a `stacked RNN`, + /// with the second RNN taking in outputs of the first RNN and + /// computing the final results. Default: 1 + TORCH_ARG(int64_t, num_layers) = 1; + /// The non-linearity to use. Can be either ``torch::kTanh`` or + /// ``torch::kReLU``. Default: ``torch::kTanh`` + TORCH_ARG(nonlinearity_t, nonlinearity) = torch::kTanh; + /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`. + /// Default: ``true`` + TORCH_ARG(bool, bias) = true; + /// If ``true``, then the input and output tensors are provided + /// as `(batch, seq, feature)`. Default: ``false`` + TORCH_ARG(bool, batch_first) = false; + /// If non-zero, introduces a `Dropout` layer on the outputs of each + /// RNN layer except the last layer, with dropout probability equal to + /// `dropout`. Default: 0 + TORCH_ARG(double, dropout) = 0.0; + /// If ``true``, becomes a bidirectional RNN. Default: ``false`` + TORCH_ARG(bool, bidirectional) = false; +}; + +/// Options for the `LSTM` module. +/// +/// Example: +/// ``` +/// LSTM model(LSTMOptions(2, +/// 4).num_layers(3).batch_first(false).bidirectional(true)); +/// ``` +struct TORCH_API LSTMOptions { + LSTMOptions(int64_t input_size, int64_t hidden_size); + + /// The number of expected features in the input `x` + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h` + TORCH_ARG(int64_t, hidden_size); + /// Number of recurrent layers. E.g., setting ``num_layers=2`` + /// would mean stacking two LSTMs together to form a `stacked LSTM`, + /// with the second LSTM taking in outputs of the first LSTM and + /// computing the final results. Default: 1 + TORCH_ARG(int64_t, num_layers) = 1; + /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`. + /// Default: ``true`` + TORCH_ARG(bool, bias) = true; + /// If ``true``, then the input and output tensors are provided + /// as (batch, seq, feature). Default: ``false`` + TORCH_ARG(bool, batch_first) = false; + /// If non-zero, introduces a `Dropout` layer on the outputs of each + /// LSTM layer except the last layer, with dropout probability equal to + /// `dropout`. Default: 0 + TORCH_ARG(double, dropout) = 0.0; + /// If ``true``, becomes a bidirectional LSTM. Default: ``false`` + TORCH_ARG(bool, bidirectional) = false; + /// Cell projection dimension. If 0, projections are not added + TORCH_ARG(int64_t, proj_size) = 0; +}; + +/// Options for the `GRU` module. +/// +/// Example: +/// ``` +/// GRU model(GRUOptions(2, +/// 4).num_layers(3).batch_first(false).bidirectional(true)); +/// ``` +struct TORCH_API GRUOptions { + GRUOptions(int64_t input_size, int64_t hidden_size); + + /// The number of expected features in the input `x` + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h` + TORCH_ARG(int64_t, hidden_size); + /// Number of recurrent layers. E.g., setting ``num_layers=2`` + /// would mean stacking two GRUs together to form a `stacked GRU`, + /// with the second GRU taking in outputs of the first GRU and + /// computing the final results. Default: 1 + TORCH_ARG(int64_t, num_layers) = 1; + /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`. + /// Default: ``true`` + TORCH_ARG(bool, bias) = true; + /// If ``true``, then the input and output tensors are provided + /// as (batch, seq, feature). Default: ``false`` + TORCH_ARG(bool, batch_first) = false; + /// If non-zero, introduces a `Dropout` layer on the outputs of each + /// GRU layer except the last layer, with dropout probability equal to + /// `dropout`. Default: 0 + TORCH_ARG(double, dropout) = 0.0; + /// If ``true``, becomes a bidirectional GRU. Default: ``false`` + TORCH_ARG(bool, bidirectional) = false; +}; + +namespace detail { + +/// Common options for RNNCell, LSTMCell and GRUCell modules +struct TORCH_API RNNCellOptionsBase { + RNNCellOptionsBase( + int64_t input_size, + int64_t hidden_size, + bool bias, + int64_t num_chunks); + TORCH_ARG(int64_t, input_size); + TORCH_ARG(int64_t, hidden_size); + TORCH_ARG(bool, bias); + TORCH_ARG(int64_t, num_chunks); +}; + +} // namespace detail + +/// Options for the `RNNCell` module. +/// +/// Example: +/// ``` +/// RNNCell model(RNNCellOptions(20, +/// 10).bias(false).nonlinearity(torch::kReLU)); +/// ``` +struct TORCH_API RNNCellOptions { + typedef std::variant nonlinearity_t; + + RNNCellOptions(int64_t input_size, int64_t hidden_size); + + /// The number of expected features in the input `x` + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h` + TORCH_ARG(int64_t, hidden_size); + /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`. + /// Default: ``true`` + TORCH_ARG(bool, bias) = true; + /// The non-linearity to use. Can be either ``torch::kTanh`` or + /// ``torch::kReLU``. Default: ``torch::kTanh`` + TORCH_ARG(nonlinearity_t, nonlinearity) = torch::kTanh; +}; + +/// Options for the `LSTMCell` module. +/// +/// Example: +/// ``` +/// LSTMCell model(LSTMCellOptions(20, 10).bias(false)); +/// ``` +struct TORCH_API LSTMCellOptions { + LSTMCellOptions(int64_t input_size, int64_t hidden_size); + + /// The number of expected features in the input `x` + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h` + TORCH_ARG(int64_t, hidden_size); + /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`. + /// Default: ``true`` + TORCH_ARG(bool, bias) = true; +}; + +/// Options for the `GRUCell` module. +/// +/// Example: +/// ``` +/// GRUCell model(GRUCellOptions(20, 10).bias(false)); +/// ``` +struct TORCH_API GRUCellOptions { + GRUCellOptions(int64_t input_size, int64_t hidden_size); + + /// The number of expected features in the input `x` + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h` + TORCH_ARG(int64_t, hidden_size); + /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`. + /// Default: ``true`` + TORCH_ARG(bool, bias) = true; +}; + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h new file mode 100644 index 0000000000000000000000000000000000000000..41db38fe0757a72081b0125f8747bc0b65c16c85 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `Transformer` module +/// +/// Example: +/// ``` +/// TransformerOptions options; +/// TransformerOptions options(16, 4); +/// auto options = TransformerOptions().d_model(4).nhead(2).dropout(0.0); +/// ``` +struct TORCH_API TransformerOptions { + // The following constructors are commonly used + // Please don't add more unless it is proved as a common usage + TransformerOptions() = default; + TransformerOptions(int64_t d_model, int64_t nhead); + TransformerOptions( + int64_t d_model, + int64_t nhead, + int64_t num_encoder_layers, + int64_t num_decoder_layers); + + /// the number of expected features in the encoder/decoder inputs + /// (default=512) + TORCH_ARG(int64_t, d_model) = 512; + + /// the number of heads in the multiheadattention models (default=8) + TORCH_ARG(int64_t, nhead) = 8; + + /// the number of sub-encoder-layers in the encoder (default=6) + TORCH_ARG(int64_t, num_encoder_layers) = 6; + + /// the number of sub-decoder-layers in the decoder (default=6) + TORCH_ARG(int64_t, num_decoder_layers) = 6; + + /// the dimension of the feedforward network model (default=2048) + TORCH_ARG(int64_t, dim_feedforward) = 2048; + + /// the dropout value (default=0.1) + TORCH_ARG(double, dropout) = 0.1; + + /// the activation function of encoder/decoder intermediate layer + /// (default=``torch::kReLU``) + TORCH_ARG(activation_t, activation) = torch::kReLU; + + /// custom encoder (default=None) + TORCH_ARG(AnyModule, custom_encoder); + + /// custom decoder (default=None) + TORCH_ARG(AnyModule, custom_decoder); +}; + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformercoder.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformercoder.h new file mode 100644 index 0000000000000000000000000000000000000000..64f6b998f4c657d1ca45a0123dbba379effd0979 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformercoder.h @@ -0,0 +1,76 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `TransformerEncoder` +/// +/// Example: +/// ``` +/// TransformerEncoderLayer encoderLayer(TransformerEncoderLayerOptions(512, +/// 8).dropout(0.1)); auto options = TransformerEncoderOptions(encoderLayer, +/// 6).norm(LayerNorm(LayerNormOptions({2}))); +/// ``` +struct TORCH_API TransformerEncoderOptions { + // This constructor will keep a shallow copy of encoder_layer, so it keeps all + // the data in encoder_layer. + TransformerEncoderOptions( + TransformerEncoderLayer encoder_layer, + int64_t num_layers); + // This constructor will create a new TransformerEncoderLayer obj based on + // passed in encoder_layer_options. + TransformerEncoderOptions( + const TransformerEncoderLayerOptions& encoder_layer_options, + int64_t num_layers); + + /// transformer Encoder Layer + TORCH_ARG(TransformerEncoderLayer, encoder_layer) = nullptr; + + /// number of encoder layers + TORCH_ARG(int64_t, num_layers); + + /// normalization module + TORCH_ARG(AnyModule, norm); +}; + +/// Options for the `TransformerDecoder` module. +/// +/// Example: +/// ``` +/// TransformerDecoderLayer decoder_layer(TransformerDecoderLayerOptions(512, +/// 8).dropout(0.1)); auto options = TransformerDecoderOptions(decoder_layer, +/// 6)norm(LayerNorm(LayerNormOptions({2}))); TransformerDecoder +/// transformer_decoder(options); +/// ``` +struct TORCH_API TransformerDecoderOptions { + // This constructor will keep the a ref of passed in decoder_layer, + // so it keeps all the data in decoder_layer. + TransformerDecoderOptions( + TransformerDecoderLayer decoder_layer, + int64_t num_layers); + // This constructor will create a new TransformerDecoderLayer obj, + // based on passed in decoder_layer_options. + TransformerDecoderOptions( + const TransformerDecoderLayerOptions& decoder_layer_options, + int64_t num_layers); + + /// decoder layer to be cloned + TORCH_ARG(TransformerDecoderLayer, decoder_layer) = nullptr; + + /// number of decoder layers + TORCH_ARG(int64_t, num_layers); + + /// normalization module + TORCH_ARG(AnyModule, norm); +}; + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformerlayer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformerlayer.h new file mode 100644 index 0000000000000000000000000000000000000000..cbd6af26a1da696ef118caa70b77d9c17a969dfd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformerlayer.h @@ -0,0 +1,72 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +using activation_t = std::variant< + enumtype::kReLU, + enumtype::kGELU, + std::function>; + +/// Options for the `TransformerEncoderLayer` +/// +/// Example: +/// ``` +/// auto options = TransformerEncoderLayer(512, 8).dropout(0.2); +/// ``` +struct TORCH_API TransformerEncoderLayerOptions { + /* implicit */ TransformerEncoderLayerOptions(int64_t d_model, int64_t nhead); + + /// the number of expected features in the input + TORCH_ARG(int64_t, d_model); + + /// the number of heads in the multiheadattention models + TORCH_ARG(int64_t, nhead); + + /// the dimension of the feedforward network model, default is 2048 + TORCH_ARG(int64_t, dim_feedforward) = 2048; + + /// the dropout value, default is 0.1 + TORCH_ARG(double, dropout) = 0.1; + + /// the activation function of intermediate layer, can be ``torch::kReLU``, + /// ``torch::GELU``, or a unary callable. Default: ``torch::kReLU`` + TORCH_ARG(activation_t, activation) = torch::kReLU; +}; + +// ============================================================================ + +/// Options for the `TransformerDecoderLayer` module. +/// +/// Example: +/// ``` +/// TransformerDecoderLayer model(TransformerDecoderLayerOptions(512, +/// 8).dropout(0.2)); +/// ``` +struct TORCH_API TransformerDecoderLayerOptions { + TransformerDecoderLayerOptions(int64_t d_model, int64_t nhead); + + /// number of expected features in the input + TORCH_ARG(int64_t, d_model); + + /// number of heads in the multiheadattention models + TORCH_ARG(int64_t, nhead); + + /// dimension of the feedforward network model. Default: 2048 + TORCH_ARG(int64_t, dim_feedforward) = 2048; + + /// dropout value. Default: 1 + TORCH_ARG(double, dropout) = 0.1; + + /// activation function of intermediate layer, can be ``torch::kGELU``, + /// ``torch::kReLU``, or a unary callable. Default: ``torch::kReLU`` + TORCH_ARG(activation_t, activation) = torch::kReLU; +}; + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/upsampling.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/upsampling.h new file mode 100644 index 0000000000000000000000000000000000000000..d03e5f2345f320624fca623109fe21f095a54983 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/upsampling.h @@ -0,0 +1,110 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace nn { + +/// Options for the `Upsample` module. +/// +/// Example: +/// ``` +/// Upsample +/// model(UpsampleOptions().scale_factor(std::vector({3})).mode(torch::kLinear).align_corners(false)); +/// ``` +struct TORCH_API UpsampleOptions { + /// output spatial sizes. + TORCH_ARG(c10::optional>, size) = c10::nullopt; + + /// multiplier for spatial size. + TORCH_ARG(c10::optional>, scale_factor) = c10::nullopt; + + /// the upsampling algorithm: one of "nearest", "linear", "bilinear", + /// "bicubic" and "trilinear". Default: "nearest" + typedef std::variant< + enumtype::kNearest, + enumtype::kLinear, + enumtype::kBilinear, + enumtype::kBicubic, + enumtype::kTrilinear> + mode_t; + TORCH_ARG(mode_t, mode) = torch::kNearest; + + /// if "True", the corner pixels of the input and output tensors are + /// aligned, and thus preserving the values at those pixels. This only has + /// effect when :attr:`mode` is "linear", "bilinear", "bicubic", or + /// "trilinear". Default: "False" + TORCH_ARG(c10::optional, align_corners) = c10::nullopt; +}; + +namespace functional { + +/// Options for `torch::nn::functional::interpolate`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::interpolate(input, +/// F::InterpolateFuncOptions().size(std::vector({4})).mode(torch::kNearest)); +/// ``` +struct TORCH_API InterpolateFuncOptions { + typedef std::variant< + enumtype::kNearest, + enumtype::kLinear, + enumtype::kBilinear, + enumtype::kBicubic, + enumtype::kTrilinear, + enumtype::kArea, + enumtype::kNearestExact> + mode_t; + + /// output spatial sizes. + TORCH_ARG(c10::optional>, size) = c10::nullopt; + + /// multiplier for spatial size. + TORCH_ARG(c10::optional>, scale_factor) = c10::nullopt; + + /// the upsampling algorithm: one of "nearest", "linear", "bilinear", + /// "bicubic", "trilinear", "area", "nearest-exact". Default: "nearest" + TORCH_ARG(mode_t, mode) = torch::kNearest; + + /// Geometrically, we consider the pixels of the input and output as squares + /// rather than points. If set to "True", the input and output tensors are + /// aligned by the center points of their corner pixels, preserving the values + /// at the corner pixels. If set to "False", the input and output tensors + /// are aligned by the corner points of their corner pixels, and the + /// interpolation uses edge value padding for out-of-boundary values, making + /// this operation *independent* of input size when :attr:`scale_factor` is + /// kept the same. This only has an effect when :attr:`mode` is "linear", + /// "bilinear", "bicubic" or "trilinear". Default: "False" + TORCH_ARG(c10::optional, align_corners) = c10::nullopt; + + /// recompute the scale_factor for use in the + /// interpolation calculation. When `scale_factor` is passed as a parameter, + /// it is used to compute the `output_size`. If `recompute_scale_factor` is + /// `true` or not specified, a new `scale_factor` will be computed based on + /// the output and input sizes for use in the interpolation computation (i.e. + /// the computation will be identical to if the computed `output_size` were + /// passed-in explicitly). Otherwise, the passed-in `scale_factor` will be + /// used in the interpolation computation. Note that when `scale_factor` is + /// floating-point, the recomputed scale_factor may differ from the one passed + /// in due to rounding and precision issues. + TORCH_ARG(c10::optional, recompute_scale_factor) = c10::nullopt; + + /// flag to apply anti-aliasing. Using anti-alias + /// option together with :attr:`align_corners` equals "False", interpolation + /// result would match Pillow result for downsampling operation. Supported + /// modes: "bilinear". Default: "False". + TORCH_ARG(bool, antialias) = false; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/vision.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/vision.h new file mode 100644 index 0000000000000000000000000000000000000000..814f4b6684d96190e88b72d53a24eceac6eb5b0e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/vision.h @@ -0,0 +1,36 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +/// Options for `torch::nn::functional::grid_sample`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::grid_sample(input, grid, +/// F::GridSampleFuncOptions().mode(torch::kBilinear).padding_mode(torch::kZeros).align_corners(true)); +/// ``` +struct TORCH_API GridSampleFuncOptions { + typedef std::variant mode_t; + typedef std:: + variant + padding_mode_t; + + /// interpolation mode to calculate output values. Default: Bilinear + TORCH_ARG(mode_t, mode) = torch::kBilinear; + /// padding mode for outside grid values. Default: Zeros + TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros; + /// Specifies perspective to pixel as point. Default: false + TORCH_ARG(c10::optional, align_corners) = c10::nullopt; +}; + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/parallel/data_parallel.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/parallel/data_parallel.h new file mode 100644 index 0000000000000000000000000000000000000000..46bf2ac6953e7b5ba5bdbd93fe63d7c91db5c6e6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/parallel/data_parallel.h @@ -0,0 +1,297 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +namespace { + +// Note [Replicating Modules] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Module replication is implemented in the following two steps: +// 1) create a module replica on each destination device using Module.clone(). +// 2) manually add a gradient edge pointing from every parameter X in every +// module replica to the same parameter X in the original module, using +// ReduceAdd as the grad_fn. +// +// ReduceAdd can ONLY be used during the backward pass of data parallel. Forward +// pass cannot use this function as it does not setup gradient function and +// history at all. Do NOT try to use ReduceAdd for any other purposes. +// +// NB: An alternative is to add Broadcast and ReduceAddCoalesce to +// torch/csrc/autograd/functions/comm.cpp as normal autograd functions, +// implement a Replicatable (like cloneable) class and add it as a friend class +// in Module.h. In the forward pass, the Replicatable could use the Broadcast +// function to replicate every module parameter and set gradient functions using +// ReduceAddCoalesce (like how it is implemented in Python). However, unlike in +// Python, where changes to Linear._parameters["weight"] would also apply to +// Linear.weight (using Linear as an example), Linear.weight and +// Linear.parameters_["weight"] are two tensor objects pointing to the same +// TensorImpl. Assigning a new tensor to Linear.parameters_["weight"] will not +// change Linear.weight. To make this work, we will have to: +// 1) force every module to also inherit from Replicatable +// 2) force every module to implement an additional function, e.g., +// Replicatable::load_params(), to pick up changes from parameters_ to their +// own member fields. +// This will be an overkill as Replicatable will only be used in data_parallel, +// not even ddp. + +// Autograd function for the replicate step in data parallel. This is only used +// in data parallel, and should not be exposed as a user API. +struct ReduceAdd : public autograd::Node { + explicit ReduceAdd(const at::Device& destination_device) + : destination_device_(destination_device){}; + ~ReduceAdd() override {} + + autograd::variable_list apply(autograd::variable_list&& inputs) override { + TORCH_CHECK( + !torch::autograd::compute_requires_grad(inputs), + "ReduceAdd can only be used during the backward pass of data parallel."); + + Tensor output = torch::zeros_like(inputs[0], {destination_device_}); + + for (auto& input : inputs) { + TORCH_CHECK( + input.sizes() == inputs[0].sizes(), + "All inputs of ReduceAdd must have the same size, but got ", + input.sizes(), + " and ", + inputs[0].sizes()); + + TORCH_CHECK( + input.dtype() == inputs[0].dtype(), + "All inputs of ReduceAdd must have the same dtype, but got ", + input.dtype(), + " and ", + inputs[0].dtype()); + + // TODO: use nccl reduce + output.add_(input.to(destination_device_)); + } + + return {output}; + } + + private: + at::Device destination_device_; +}; + +} // namespace + +// A friend function to Module, it recursively sets gradient edges pointing from +// every parameter X in every module replica to the same parameter X in the +// original module. See [Replicating Modules] +template +void replicate_grad_edges( + const std::shared_ptr& module, + const std::vector>& replicas, + const std::vector& devices) { + for (auto& parameter : module->named_parameters(/*recurse=*/false)) { + auto grad_fn = std::make_shared((*parameter).device()); + grad_fn->set_next_edges(autograd::collect_next_edges(*parameter)); + + for (const auto i : c10::irange(devices.size())) { + autograd::set_history(replicas[i]->parameters_[parameter.key()], grad_fn); + } + } + + for (auto& buffer : module->named_buffers(/*recurse=*/false)) { + if (buffer.value().requires_grad()) { + auto grad_fn = std::make_shared((*buffer).device()); + grad_fn->set_next_edges(autograd::collect_next_edges(*buffer)); + + for (const auto i : c10::irange(devices.size())) { + autograd::set_history(replicas[i]->buffers_[buffer.key()], grad_fn); + } + } + } + + for (auto& child : module->children_) { + std::vector> child_replicas; + child_replicas.reserve(devices.size()); + for (auto& replica : replicas) { + child_replicas.push_back(replica->children_[child.key()]); + } + + // recursively set gradient edges for all children + replicate_grad_edges(*child, child_replicas, devices); + } +} + +namespace parallel { + +/// Replicates a module on the given list of devices. +/// A replica is created by calling `clone()` on the module. For this, the +/// module must inherit from `nn::Cloneable`, or define its own `clone()` +/// method, which is expected to perform a deep copy of the module. +template +std::vector> replicate( + const std::shared_ptr& module, + const std::vector& devices) { + std::vector> replicas; + replicas.reserve(devices.size()); + for (const auto& device : devices) { + replicas.push_back( + std::dynamic_pointer_cast(module->clone(device))); + } + // Configure gradient edges to point from replcia parameters to original + // module parameters. See [Replicating Modules] + replicate_grad_edges(module, replicas, devices); + return replicas; +} + +/// Replicates a module holder on the given list of devices. +/// This method allows calling `replicate()` with a module holder, such as +/// `Linear`. +template +std::vector> replicate( + const ModuleHolder& module, + const std::vector& devices) { + auto ptrs = replicate(module.ptr(), devices); + return std::vector>(ptrs.begin(), ptrs.end()); +} + +/// Applies the given inputs to the given modules in a parallel fashion. +/// Conceptually, a thread is spawned for each `(module, input)` pair, in which +/// `forward()` is called on the module with its corresponding input. The +/// outputs of the individual calls are stored in a vector and returned. +/// +/// The first exception caught by any thread is stashed and rethrown after all +/// threads have completed their operation. +/// +/// Further remarks: +/// 1. The length of the module container must match the length of the inputs. +/// 2. If a list of devices is supplied, it must match the list of modules in +/// length. Each device will be set to the current default device during the +/// invocation of the respective module. This means any tensors allocated on the +/// default device inside the module will be constructed on this device. +template +std::vector parallel_apply( + std::vector& modules, + const std::vector& inputs, + const optional>& devices = nullopt) { + TORCH_CHECK( + modules.size() == inputs.size(), "Must have as many inputs as modules"); + if (devices) { + TORCH_CHECK( + modules.size() == devices->size(), + "Must have as many devices as modules"); + } + + std::vector outputs(modules.size()); + std::mutex mutex; + + // std::exception_ptr can be passed between threads: + // > An instance of std::exception_ptr may be passed to another function, + // > possibly on another thread, where the exception may be rethrown [...]. + // https://en.cppreference.com/w/cpp/error/exception_ptr + std::exception_ptr exception; + + at::parallel_for( + /*begin=*/0, + /*end=*/modules.size(), + /*grain_size=*/1, + [&modules, &inputs, &devices, &outputs, &mutex, &exception]( + int64_t index, int64_t stop) { + for (; index < stop; ++index) { + try { + auto output = modules[index]->forward(inputs[index]); + output = + output.to(devices ? (*devices)[index] : inputs[index].device()); + std::lock_guard lock(mutex); + outputs[index] = output; + } catch (...) { + std::lock_guard lock(mutex); + if (!exception) { + exception = std::current_exception(); + } + } + } + }); + + if (exception) { + std::rethrow_exception(exception); + } + + return outputs; +} + +/// Evaluates `module(input)` in parallel across the given `devices`. If +/// `devices` is not supplied, the invocation is parallelized across all +/// available CUDA devices. If `output_device` is supplied, the final, combined +/// tensor will be placed on this device. If not, it defaults to the first +/// device in `devices`. +/// +/// In detail, this method performs the following four distinct steps: +/// 1. *Scatter* the input to the given devices, +/// 2. *Replicate* (deep clone) the model on each device, +/// 3. *Evaluate* each module with its input on its device, +/// 4. *Gather* the outputs of each replica into a single output tensor, located +/// on the `output_device`. +template +Tensor data_parallel( + ModuleType module, + Tensor input, + optional> devices = nullopt, + optional output_device = nullopt, + int64_t dim = 0) { + if (!devices) { + const auto device_count = torch::cuda::device_count(); + TORCH_CHECK( + device_count > 0, "Expected at least one CUDA device to be available"); + devices = std::vector(); + devices->reserve(device_count); + for (const auto index : c10::irange(device_count)) { + devices->emplace_back(kCUDA, static_cast(index)); + } + } + if (!output_device) { + output_device = devices->front(); + } + + if (devices->size() == 1) { + module->to(devices->front()); + input = input.to(devices->front()); + return module->forward(std::move(input)).to(*output_device); + } + + autograd::Scatter scatter(*devices, /*chunk_sizes=*/nullopt, dim); + auto scattered_inputs = fmap(scatter.apply({std::move(input)})); + // Input tensor might not be big enough to scale across all available devices + if (scattered_inputs.size() < devices->size()) { + devices->resize( + scattered_inputs.size(), + Device(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)); + } + + auto replicas = replicate(module, *devices); + auto outputs = parallel_apply(replicas, scattered_inputs, *devices); + return autograd::Gather(*output_device, dim) + .apply(fmap(std::move(outputs))) + .front(); +} + +} // namespace parallel +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl-inl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..b38e6cf2c0ff729485cf4a27a1ae49818d06c807 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl-inl.h @@ -0,0 +1,74 @@ +// This class exists only to do SFINAE on abstract types `T` that are really +// `ModuleHolder`, because there's no good way to say that `T` is a +// `ModuleHolder` over some unknown type `ModuleType`. With this, you can do +// `enable_if_t>`. +struct ModuleHolderIndicator {}; + +// A type trait that is true for types that are `ModuleHolder`s. +template +using is_module_holder = std::is_base_of>; + +template +using disable_if_module_holder_t = disable_if_t::value>; + +// A collection of templates that answer the question whether a type `T` is a +// `ModuleHolder`, and if so whether its contained type is of type `C`. This is +// tricky because it is hard to short circuit in template metaprogramming. A +// naive and incorrect solution to this problem would be something like +// `disable_if::value && typename T::ContainedType == C>`. +// This would disable all types that are not `ModuleHolder`s, because even +// though the `is_module_holder::value` may be `false` for such types the +// `T::ContainedType` access would be ill-formed and thus fail the whole +// expression by the rules of SFINAE. Instead we have to use template +// specialization to statically branch on the first condition +// (`is_module_holder`) and are only then allowed to query +// `T::ContainedType` in the branch for which the condition was true. + +// Base template. +template +struct is_module_holder_of_impl; + +// False branch. `T` is not a `ModuleHolder` and thus not a `ModuleHolder` with +// contained type `C`. +template +struct is_module_holder_of_impl : std::false_type {}; + +// True branch. `T` is a `ModuleHolder` and thus we can legit access its +// `ContainedType` and compare it against `C`. +template +struct is_module_holder_of_impl + : std::is_same {}; + +// Helper template. +template +struct is_module_holder_of : is_module_holder_of_impl< + is_module_holder::value, + decay_t, + decay_t> {}; + +// A collection of templates that allow deducing the return type of the +// `forward()` method, but only if a module actually has a `forward()` method, +// and otherwise deduces to the type `void`. + +template +struct return_type_of_forward_impl; + +template +struct return_type_of_forward_impl { + using type = decltype(::std::declval().forward(::std::declval()...)); +}; + +template +struct return_type_of_forward_impl { + using type = void; +}; + +template +using return_type_of_forward = return_type_of_forward_impl< + torch::detail::has_forward::value, + C, + Args...>; + +template +using return_type_of_forward_t = + typename return_type_of_forward::type; diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl.h new file mode 100644 index 0000000000000000000000000000000000000000..d66d83c257ebd0061b5fa59e1299dd16ff9badb8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/pimpl.h @@ -0,0 +1,214 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include +#include + +namespace torch { +namespace detail { +// Dump all the template metaprogramming in this file. +#include +} // namespace detail + +namespace nn { + +/// A `ModuleHolder` is essentially a wrapper around `std::shared_ptr` where +/// `M` is an `nn::Module` subclass, with convenient constructors defined for +/// the kind of constructions we want to allow for our modules. +template +class ModuleHolder : torch::detail::ModuleHolderIndicator { + protected: + /// The module pointer this class wraps. + /// NOTE: Must be placed at the top of the class so that we can use it with + /// trailing return types below. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::shared_ptr impl_; + + public: + using ContainedType = Contained; + + /// Default constructs the contained module if if has a default constructor, + /// else produces a static error. + /// + /// NOTE: This uses the behavior of template + /// classes in C++ that constructors (or any methods) are only compiled when + /// actually used. + ModuleHolder() : impl_(default_construct()) { + static_assert( + std::is_default_constructible::value, + "You are trying to default construct a module which has " + "no default constructor. Use = nullptr to give it the empty state " + "(e.g. `Linear linear = nullptr;` instead of `Linear linear;`)."); + } + + /// Constructs the `ModuleHolder` with an empty contained value. Access to + /// the underlying module is not permitted and will throw an exception, until + /// a value is assigned. + /* implicit */ ModuleHolder(std::nullptr_t) : impl_(nullptr) {} + + /// Constructs the `ModuleHolder` with a contained module, forwarding all + /// arguments to its constructor. + template < + typename Head, + typename... Tail, + typename = typename std::enable_if< + !(torch::detail::is_module_holder_of::value && + (sizeof...(Tail) == 0))>::type> + explicit ModuleHolder(Head&& head, Tail&&... tail) + : impl_(new Contained( + std::forward(head), + std::forward(tail)...)) {} + + /// Constructs the `ModuleHolder` from a pointer to the contained type. + /// Example: `Linear(std::make_shared(...))`. + /* implicit */ ModuleHolder(std::shared_ptr module) + : impl_(std::move(module)) {} + + /// Returns true if the `ModuleHolder` contains a module, or false if it is + /// `nullptr`. + explicit operator bool() const noexcept { + return !is_empty(); + } + + /// Forwards to the contained module. + Contained* operator->() { + return get(); + } + + /// Forwards to the contained module. + const Contained* operator->() const { + return get(); + } + + /// Returns a reference to the contained module. + Contained& operator*() { + return *get(); + } + + /// Returns a const reference to the contained module. + const Contained& operator*() const { + return *get(); + } + + /// Returns a shared pointer to the underlying module. + const std::shared_ptr& ptr() const { + TORCH_CHECK(!is_empty(), "Accessing empty ModuleHolder"); + return impl_; + } + + /// Returns a pointer to the underlying module. + Contained* get() { + TORCH_CHECK(!is_empty(), "Accessing empty ModuleHolder"); + return impl_.get(); + } + + /// Returns a const pointer to the underlying module. + const Contained* get() const { + TORCH_CHECK(!is_empty(), "Accessing empty ModuleHolder"); + return impl_.get(); + } + + /// Calls the `forward()` method of the contained module. + template + auto operator()(Args&&... args) + -> torch::detail::return_type_of_forward_t { + // This will not compile if the module does not have a `forward()` method + // (as expected). + // NOTE: `std::forward` is qualified to prevent VS2017 emitting + // error C2872: 'std': ambiguous symbol + return impl_->forward(::std::forward(args)...); + } + + /// Forwards to the subscript operator of the contained module. + /// NOTE: std::forward is qualified to prevent VS2017 emitting + /// error C2872: 'std': ambiguous symbol + template + decltype(auto) operator[](Arg&& arg) { + return (*impl_)[::std::forward(arg)]; + } + + /// Returns true if the `ModuleHolder` does not contain a module. + bool is_empty() const noexcept { + return impl_ == nullptr; + } + + private: + /// In C++17, the two methods below could be written as the following: + /// if constexpr (std::is_default_constructible_v) { + /// return std::make_shared(); + /// } else { + /// return nullptr; + /// } + /// In C++11, we use SFINAE instead of `if constexpr`. + + template < + typename T = Contained, + typename = torch::enable_if_t::value>> + std::shared_ptr default_construct() { + return std::make_shared(); + } + + template + torch::disable_if_t< + std::is_default_constructible::value, + std::shared_ptr> + default_construct() { + return nullptr; + } +}; + +/// Pretty prints the given `Module` into the `ostream`. +template +std::ostream& operator<<( + std::ostream& stream, + const nn::ModuleHolder& module) { + return stream << *module; +} + +/// Serializes a `ModuleHolder` into an `OutputArchive`. +template +serialize::OutputArchive& operator<<( + serialize::OutputArchive& archive, + const nn::ModuleHolder& module) { + return archive << module.ptr(); +} + +/// Deserializes a `ModuleHolder` from an `InputArchive`. +template +serialize::InputArchive& operator>>( + serialize::InputArchive& archive, + nn::ModuleHolder& module) { + return archive >> module.ptr(); +} + +} // namespace nn +} // namespace torch + +// Workaround for CUDA 10.2 and below not allowing attribute unused on +// using declarations. +#ifdef __CUDACC__ +#define TORCH_UNUSED_EXCEPT_CUDA +#else +#define TORCH_UNUSED_EXCEPT_CUDA C10_UNUSED +#endif + +/// Defines a class `Name` which inherits from `nn::ModuleHolder` to provide a +/// wrapper over a `std::shared_ptr`. +/// `Impl` is a type alias for `ImplType` which provides a way to call static +/// method of `ImplType`. +#define TORCH_MODULE_IMPL(Name, ImplType) \ + class Name : public torch::nn::ModuleHolder { /* NOLINT */ \ + public: \ + using torch::nn::ModuleHolder::ModuleHolder; \ + using Impl TORCH_UNUSED_EXCEPT_CUDA = ImplType; \ + } + +/// Like `TORCH_MODULE_IMPL`, but defaults the `ImplType` name to `Impl`. +#define TORCH_MODULE(Name) TORCH_MODULE_IMPL(Name, Name##Impl) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..8dbfaf5126e4f3db94174937432ea4b017354ab7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils.h @@ -0,0 +1,5 @@ +#pragma once + +#include +#include +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/clip_grad.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/clip_grad.h new file mode 100644 index 0000000000000000000000000000000000000000..e1023bd1eb5c7b88895c6dc5b349d3b4a976f226 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/clip_grad.h @@ -0,0 +1,147 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace nn { +namespace utils { + +// Clips gradient norm of a vector of Tensors. +// See +// https://pytorch.org/docs/stable/nn.html?highlight=clip_grad_norm#torch.nn.utils.clip_grad_norm_ +// for more details about this module. +// +// Difference with the python version: unlike the python version, even when +// skipping the finiteness checks (error_if_nonfinite = false), this function +// will introduce a device <=> CPU synchronization (for devices where that makes +// sense!) in order to return a CPU-side `double`. This C++ version therefore +// cannot be run fully asynchronously w.r.t. the device of the gradients. +inline double clip_grad_norm_( + const std::vector& parameters, + double max_norm, + double norm_type = 2.0, + bool error_if_nonfinite = false) { + std::vector params_with_grad; + + for (const auto& param : parameters) { + auto& grad = param.grad(); + if (grad.defined()) { + params_with_grad.push_back(param); + } + } + + if (params_with_grad.empty()) { + return 0.0; + } + + Tensor total_norm_tensor; + if (norm_type == std::numeric_limits::infinity()) { + std::vector norms; + norms.reserve(params_with_grad.size()); + + for (const auto& param : params_with_grad) { + norms.emplace_back(param.grad().data().abs().max()); + } + total_norm_tensor = + (norms.size() == 1) ? norms[0] : torch::max(torch::stack(norms)); + } else if (norm_type == 0) { + total_norm_tensor = + torch::full({}, static_cast(params_with_grad.size())); + } else { + std::vector norms; + norms.reserve(params_with_grad.size()); + + for (const auto& param : params_with_grad) { + norms.emplace_back(param.grad().data().norm(norm_type)); + } + total_norm_tensor = + (norms.size() == 1) ? norms[0] : torch::stack(norms).norm(norm_type); + } + + // When possible (ie when skipping the finiteness check), we avoid + // synchronizing the CPU and the gradients' device until the very end to + // preserve async execution on the device. When checking for finite-ness, this + // optional ensures we only sync once. + c10::optional total_norm = c10::nullopt; + if (error_if_nonfinite) { + total_norm = total_norm_tensor.item().toDouble(); + TORCH_CHECK( + std::isfinite(*total_norm), + "The total norm of order ", + norm_type, + " for gradients from `parameters` ", + "is non-finite, so it cannot be clipped. To disable this error and scale ", + "the gradients with the non-finite norm anyway, set ", + "`error_if_nonfinite=false`"); + } + + auto clip_coef = max_norm / (total_norm_tensor + 1e-6); + auto clip_coef_clamped = + torch::clamp(clip_coef, c10::nullopt /* min */, 1.0 /* max */); + for (auto& param : params_with_grad) { + param.grad().data().mul_(clip_coef_clamped); + } + + if (!total_norm.has_value()) { + total_norm = total_norm_tensor.item().toDouble(); + } + return *total_norm; +} + +// A wrapper around clip_grad_norm_ that allows us to call the function with a +// braced-init-list of Tensors. +inline double clip_grad_norm_( + std::initializer_list parameters, + double max_norm, + double norm_type = 2.0, + bool error_if_nonfinite = false) { + return clip_grad_norm_( + std::vector(parameters), max_norm, norm_type, error_if_nonfinite); +} + +// A wrapper around clip_grad_norm_ that allows us to call the function with a +// single Tensor. +inline double clip_grad_norm_( + Tensor parameter, + double max_norm, + double norm_type = 2.0, + bool error_if_nonfinite = false) { + std::vector params = {std::move(parameter)}; + return clip_grad_norm_( + std::move(params), max_norm, norm_type, error_if_nonfinite); +} + +// Clips gradient of an iterable of parameters at specified value. +// Gradients are modified in-place. +// See https://pytorch.org/docs/stable/nn.html#clip-grad-value +// for more details about this module. +inline void clip_grad_value_( + const std::vector& parameters, + double clip_value) { + for (const auto& param : parameters) { + if (param.grad().defined()) { + param.grad().data().clamp_(-clip_value, clip_value); + } + } +} + +// A wrapper around clip_grad_value_ that allows us to call the function with a +// braced-init-list of Tensors. +inline void clip_grad_value_( + std::initializer_list parameters, + double clip_value) { + clip_grad_value_(std::vector(parameters), clip_value); +} + +// A wrapper around clip_grad_value_ that allows us to call the function with a +// single Tensor. +inline void clip_grad_value_(Tensor parameter, double clip_value) { + std::vector params = {std::move(parameter)}; + clip_grad_value_(std::move(params), clip_value); +} + +} // namespace utils +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/convert_parameters.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/convert_parameters.h new file mode 100644 index 0000000000000000000000000000000000000000..2ac1d317c99223ae9dd8c2ca27ef7d7a20e03108 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/convert_parameters.h @@ -0,0 +1,82 @@ +#pragma once + +#include +#include + +namespace torch { +namespace nn { +namespace utils { + +// This helper function is to check if the parameters are located +// in the same device. Currently, the conversion between model parameters +// and single vector form is not supported for multiple allocations, +// e.g. parameters in different GPUs, or mixture of CPU/GPU. +inline c10::optional _check_param_device( + const torch::Tensor& param, + c10::optional old_param_device) { + // Meet the first parameter + if (old_param_device == c10::nullopt) { + old_param_device = param.is_cuda() ? param.get_device() : -1; + } else { + bool warn = false; + if (param.is_cuda()) { // Check if in same GPU + warn = (param.get_device() != old_param_device.value()); + } else { // Check if in CPU + warn = (old_param_device.value() != -1); + } + if (warn) { + TORCH_CHECK( + false, + "Found two parameters on different devices, ", + "this is currently not supported."); + } + } + + return old_param_device; +} + +// Convert parameters to one vector +inline torch::Tensor parameters_to_vector( + const std::vector& parameters) { + c10::optional param_device; + + std::vector vec; + vec.reserve(parameters.size()); + + for (const torch::Tensor& param : parameters) { + // Ensure the parameters are located in the same device + param_device = _check_param_device(param, param_device); + + vec.push_back(param.view(-1)); + } + + return torch::cat(vec); +} + +// Convert one vector to the parameters +inline void vector_to_parameters( + const torch::Tensor& vec, + const std::vector& parameters) { + // Flag for the device where the parameter is located + c10::optional param_device; + + // Pointer for slicing the vector for each parameter + int64_t pointer = 0; + for (const torch::Tensor& param : parameters) { + // Ensure the parameters are located in the same device + param_device = _check_param_device(param, param_device); + + // The length of the parameter + auto num_param = param.numel(); + // Slice the vector, reshape it, and replace the old data of the parameter + param.set_data( + vec.slice(0, pointer, pointer + num_param).view_as(param).data()); + + // Increment the pointer + pointer += num_param; + } +} + +} // namespace utils +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/rnn.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/rnn.h new file mode 100644 index 0000000000000000000000000000000000000000..bb6bd753caadde88acbb76d447d4e9305493da85 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils/rnn.h @@ -0,0 +1,353 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace nn { +namespace utils { +namespace rnn { + +inline Tensor invert_permutation(const Tensor& permutation) { + if (!permutation.defined()) { + return torch::Tensor(); + } + Tensor output = + torch::empty_like(permutation, torch::MemoryFormat::Contiguous); + output.scatter_( + 0, + permutation, + torch::arange(0, permutation.numel(), permutation.device())); + return output; +} + +/// Holds the data and list of `batch_sizes` of a packed sequence. +/// +/// All RNN modules accept packed sequences as inputs. +/// +/// Note: +/// Instances of this class should never be created manually. They are meant +/// to be instantiated by functions like `pack_padded_sequence`. +/// +/// Batch sizes represent the number elements at each sequence step in +/// the batch, not the varying sequence lengths passed to +/// `pack_padded_sequence`. For instance, given data ``abc`` and ``x`` +/// the :class:`PackedSequence` would contain data ``axbc`` with +/// ``batch_sizes=[2,1,1]``. +/// +/// Attributes: +/// data (Tensor): Tensor containing packed sequence +/// batch_sizes (Tensor): Tensor of integers holding +/// information about the batch size at each sequence step +/// sorted_indices (Tensor, optional): Tensor of integers holding how this +/// :class:`PackedSequence` is constructed from sequences. +/// unsorted_indices (Tensor, optional): Tensor of integers holding how this +/// to recover the original sequences with correct order. +/// +/// .. note:: +/// `data` can be on arbitrary device and of arbitrary dtype. +/// `sorted_indices` and `unsorted_indices` must be ``torch::kInt64`` +/// tensors on the same device as `data`. +/// +/// However, `batch_sizes` should always be a CPU ``torch::kInt64`` tensor. +/// +/// This invariant is maintained throughout `PackedSequence` class, +/// and all functions that construct a `PackedSequence` in libtorch +/// (i.e., they only pass in tensors conforming to this constraint). +class PackedSequence { + public: + explicit PackedSequence( + Tensor data, + Tensor batch_sizes, + Tensor sorted_indices = {}, + Tensor unsorted_indices = {}) { + // NB: if unsorted_indices is provided, it should be the inverse permutation + // to sorted_indices. Don't assert it here because the PackedSequence ctor + // should only be used internally. + if (!unsorted_indices.defined()) { + unsorted_indices = invert_permutation(sorted_indices); + } + TORCH_CHECK( + batch_sizes.device().type() == kCPU, + "batch_sizes should always be on CPU. " + "Instances of PackedSequence should never be created manually. " + "They should be instantiated by functions like pack_sequence " + "and pack_padded_sequences in nn::utils::rnn. " + "https://pytorch.org/docs/stable/nn.html#torch.nn.utils.rnn.pack_sequence"); + data_ = std::move(data); + batch_sizes_ = std::move(batch_sizes); + sorted_indices_ = std::move(sorted_indices); + unsorted_indices_ = std::move(unsorted_indices); + } + + const Tensor& data() const { + return data_; + } + + const Tensor& batch_sizes() const { + return batch_sizes_; + } + + const Tensor& sorted_indices() const { + return sorted_indices_; + } + + const Tensor& unsorted_indices() const { + return unsorted_indices_; + } + + PackedSequence pin_memory() const { + // Why not convert `batch_sizes`? + // See NOTE [ device and dtype of a PackedSequence ] + return PackedSequence( + data_.pin_memory(), + batch_sizes_, + sorted_indices_.defined() ? sorted_indices_.pin_memory() : Tensor(), + unsorted_indices_.defined() ? unsorted_indices_.pin_memory() + : Tensor()); + } + + PackedSequence to(TensorOptions options) const { + // Performs dtype and/or device conversion on `data_`. + // + // If the ``data_`` Tensor already has the correct `torch::Dtype` + // and `torch::Device`, then ``self`` is returned. + // Otherwise, returns a copy with the desired configuration. + + // Why not convert `batch_sizes`? + // See NOTE [ device and dtype of a PackedSequence ] + Tensor data = data_.to(options); + if (data.is_same(data_)) { + return *this; + } else { + // Does not forward device or dtype args, device is set from data.device() + Tensor sorted_indices = sorted_indices_.defined() + ? sorted_indices_.to( + options.device(data.device()).dtype(sorted_indices_.dtype())) + : Tensor(); + Tensor unsorted_indices = unsorted_indices_.defined() + ? unsorted_indices_.to( + options.device(data.device()).dtype(unsorted_indices_.dtype())) + : Tensor(); + return PackedSequence( + std::move(data), + batch_sizes_, + std::move(sorted_indices), + std::move(unsorted_indices)); + } + } + + PackedSequence cuda() const { + return to(kCUDA); + } + + PackedSequence cpu() const { + return to(kCPU); + } + + /// Returns true if `data_` stored on a gpu + bool is_cuda() const { + return data_.is_cuda(); + } + + /// Returns true if `data_` stored on in pinned memory + bool is_pinned() const { + return data_.is_pinned(); + } + + private: + Tensor data_; + Tensor batch_sizes_; + Tensor sorted_indices_; + Tensor unsorted_indices_; +}; + +/// Packs a Tensor containing padded sequences of variable length. +/// +/// `input` can be of size ``T x B x *`` where `T` is the length of the +/// longest sequence (equal to ``lengths[0]``), ``B`` is the batch size, and +/// ``*`` is any number of dimensions (including 0). If ``batch_first`` is +/// ``true``, ``B x T x *`` `input` is expected. +/// +/// For unsorted sequences, use `enforce_sorted = false`. If `enforce_sorted` is +/// ``true``, the sequences should be sorted by length in a decreasing order, +/// i.e. +/// ``input[:,0]`` should be the longest sequence, and ``input[:,B-1]`` the +/// shortest one. +/// +/// Note: +/// This function accepts any input that has at least two dimensions. You +/// can apply it to pack the labels, and use the output of the RNN with +/// them to compute the loss directly. A Tensor can be retrieved from +/// a `PackedSequence` object by calling its ``.data()`` function. +/// +/// Arguments: +/// input (Tensor): padded batch of variable length sequences. +/// lengths (Tensor): list of sequences lengths of each batch element. +/// batch_first (bool, optional): if ``true``, the input is expected in ``B +/// x T x *`` +/// format. Default: ``false``. +/// enforce_sorted (bool, optional): if ``true``, the input is expected to +/// contain sequences sorted by length in a decreasing order. If +/// ``false``, this condition is not checked. Default: ``true``. +/// +/// Returns: +/// a `PackedSequence` object +inline PackedSequence pack_padded_sequence( + Tensor input, + Tensor lengths, + bool batch_first = false, + bool enforce_sorted = true) { + lengths = lengths.to(kInt64); + Tensor sorted_indices; + if (enforce_sorted) { + sorted_indices = Tensor(); + } else { + std::tie(lengths, sorted_indices) = + torch::sort(lengths, /*dim=*/-1, /*descending=*/true); + sorted_indices = sorted_indices.to(input.device()); + int64_t batch_dim = batch_first ? 0 : 1; + input = input.index_select(batch_dim, sorted_indices); + } + + Tensor data, batch_sizes; + std::tie(data, batch_sizes) = + torch::_pack_padded_sequence(input, lengths, batch_first); + return PackedSequence( + std::move(data), std::move(batch_sizes), std::move(sorted_indices), {}); +} + +/// Pads a packed batch of variable length sequences. +/// +/// It is an inverse operation to `pack_padded_sequence`. +/// +/// The returned Tensor's data will be of size ``T x B x *``, where `T` is the +/// length of the longest sequence and `B` is the batch size. If ``batch_first`` +/// is true, the data will be transposed into ``B x T x *`` format. +/// +/// Batch elements will be ordered decreasingly by their length. +/// +/// Arguments: +/// sequence (PackedSequence): batch to pad +/// batch_first (bool, optional): if ``true``, the output will be in ``B x T +/// x *`` +/// format. +/// padding_value (double, optional): values for padded elements. +/// total_length (int64_t, optional): if specified, the output will be +/// padded to +/// have length `total_length`. This method will throw error +/// if `total_length` is less than the max sequence length in +/// `sequence`. +/// +/// Returns: +/// Tuple of Tensor containing the padded sequence, and a Tensor +/// containing the list of lengths of each sequence in the batch. +inline std::tuple pad_packed_sequence( + PackedSequence sequence, + bool batch_first = false, + double padding_value = 0.0, + c10::optional total_length = torch::nullopt) { + int64_t max_seq_length = sequence.batch_sizes().size(0); + if (total_length.has_value()) { + int64_t total_length_val = total_length.value(); + TORCH_CHECK( + total_length_val >= max_seq_length, + "Expected total_length to be at least the length " + "of the longest sequence in input, but got " + "total_length=", + total_length_val, + " and max sequence length being ", + max_seq_length); + max_seq_length = total_length_val; + } + Tensor padded_output, lengths; + std::tie(padded_output, lengths) = torch::_pad_packed_sequence( + sequence.data(), + sequence.batch_sizes(), + batch_first, + padding_value, + max_seq_length); + const Tensor& unsorted_indices = sequence.unsorted_indices(); + if (unsorted_indices.defined()) { + int64_t batch_dim = batch_first ? 0 : 1; + return std::make_tuple( + padded_output.index_select(batch_dim, unsorted_indices), + lengths.index({unsorted_indices})); + } + return std::make_tuple(padded_output, lengths); +} + +/// Pad a list of variable length Tensors with ``padding_value`` +/// +/// ``pad_sequence`` stacks a list of Tensors along a new dimension, +/// and pads them to equal length. For example, if the input is list of +/// sequences with size ``L x *`` and if batch_first is false, and ``T x B x *`` +/// otherwise. +/// +/// `B` is batch size. It is equal to the number of elements in ``sequences``. +/// `T` is length of the longest sequence. +/// `L` is length of the sequence. +/// `*` is any number of trailing dimensions, including none. +/// +/// Note: +/// This function returns a Tensor of size ``T x B x *`` or ``B x T x *`` +/// where `T` is the length of the longest sequence. This function assumes +/// trailing dimensions and type of all the Tensors in sequences are same. +/// +/// Arguments: +/// sequences (torch::ArrayRef): list of variable length sequences. +/// batch_first (bool, optional): output will be in ``B x T x *`` if true, +/// or in +/// ``T x B x *`` otherwise +/// padding_value (double, optional): value for padded elements. Default: 0. +/// +/// Returns: +/// Tensor of size ``T x B x *`` if `batch_first` is ``false``. +/// Tensor of size ``B x T x *`` otherwise +inline Tensor pad_sequence( + ArrayRef sequences, + bool batch_first = false, + double padding_value = 0) { + return at::pad_sequence(sequences, batch_first, padding_value); +} + +/// Packs a list of variable length Tensors +/// +/// ``sequences`` should be a list of Tensors of size ``L x *``, where `L` is +/// the length of a sequence and `*` is any number of trailing dimensions, +/// including zero. +/// +/// For unsorted sequences, use `enforce_sorted = false`. If ``enforce_sorted`` +/// is ``true``, the sequences should be sorted in the order of decreasing +/// length. +/// +/// +/// Arguments: +/// sequences (torch::ArrayRef): A list of sequences of decreasing +/// length. enforce_sorted (bool, optional): if ``true``, checks that the +/// input +/// contains sequences sorted by length in a decreasing order. If +/// ``false``, this condition is not checked. Default: ``true``. +/// +/// Returns: +/// a `PackedSequence` object +inline PackedSequence pack_sequence( + ArrayRef sequences, + bool enforce_sorted = true) { + Tensor lengths = torch::empty({(int64_t)sequences.size()}, kInt64); + for (const auto i : c10::irange(sequences.size())) { + lengths[i] = sequences[i].size(0); + } + return pack_padded_sequence( + at::pad_sequence(sequences), + std::move(lengths), + /*batch_first=*/false, + /*enforce_sorted=*/enforce_sorted); +} + +} // namespace rnn +} // namespace utils +} // namespace nn +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim.h new file mode 100644 index 0000000000000000000000000000000000000000..8aef6238ebaa3d9b91b827c88fe5d4c5eb0c765a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/ordered_dict.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/ordered_dict.h new file mode 100644 index 0000000000000000000000000000000000000000..31a2ab65131c1c575591a5453773fe17b3200b82 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/ordered_dict.h @@ -0,0 +1,516 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +/// An ordered dictionary implementation, akin to Python's `OrderedDict`. +template +class OrderedDict { + public: + /// A (key, value) pair. + class Item; + + // The lifetime of an iterator is bound to the lifetime of the `OrderedDict`. + // Further, any `insert()` operation may invalidate all iterators + // pointing into the vector. + using Iterator = typename std::vector::iterator; + using ConstIterator = typename std::vector::const_iterator; + + /// Constructs the `OrderedDict` with a short description of the kinds of keys + /// stored in the `OrderedDict`. This description is used in error messages + /// thrown by the `OrderedDict`. + explicit OrderedDict(std::string key_description = "Key"); + + /// Copy constructs this `OrderedDict` from `other`. + OrderedDict(const OrderedDict& other); + + /// Assigns items from `other` to this `OrderedDict`. + OrderedDict& operator=(const OrderedDict& other); + + // NB: Move works by default, because you can move-construct vectors of const + // values. I tried to make this noexcept (conditional on the move constructors + // of index_ and items_ being noexcept) but the obvious spelling didn't + // compile on Windows. + OrderedDict(OrderedDict&& other) noexcept = default; + OrderedDict& operator=(OrderedDict&& other) noexcept = default; + + ~OrderedDict() = default; + + /// Constructs a new `OrderedDict` and pre-populates it with the given + /// `Item`s. + /*implicit */ OrderedDict(std::initializer_list initializer_list); + + /// Returns the key description string the `OrderedDict` was constructed with. + const std::string& key_description() const noexcept; + + // Element Access + + /// Returns the very first item in the `OrderedDict` and throws an exception + /// if it is empty. + Item& front(); + + /// Returns the very first item in the `OrderedDict` and throws an exception + /// if it is empty. + const Item& front() const; + + /// Returns the very last item in the `OrderedDict` and throws an exception + /// if it is empty. + Item& back(); + + /// Returns the very last item in the `OrderedDict` and throws an exception + /// if it is empty. + const Item& back() const; + + /// Returns the item at the `index`-th position in the `OrderedDict`. Throws + /// an exception if the index is out of bounds. + Item& operator[](size_t index); + + /// Returns the item at the `index`-th position in the `OrderedDict`. Throws + /// an exception if the index is out of bounds. + const Item& operator[](size_t index) const; + + /// Returns the value associated with the given `key`. Throws an exception if + /// no such key is stored in the `OrderedDict`. Use `find()` for a + /// non-throwing way of accessing a value if it is present. + Value& operator[](const Key& key); + + /// Returns the value associated with the given `key`. Throws an exception if + /// no such key is stored in the `OrderedDict`. Use `find()` for a + /// non-throwing way of accessing a value if it is present. + const Value& operator[](const Key& key) const; + + // Lookup + + /// Returns a pointer to the value associated with the given key, or a + /// `nullptr` if no such key is stored in the `OrderedDict`. + Value* find(const Key& key) noexcept; + + /// Returns a pointer to the value associated with the given key, or a + /// `nullptr` if no such key is stored in the `OrderedDict`. + const Value* find(const Key& key) const noexcept; + + /// Returns true if the key is present in the `OrderedDict`. + bool contains(const Key& key) const noexcept; + + // Iterators + + /// Returns an iterator to the first item in the `OrderedDict`. Iteration is + /// ordered. + Iterator begin(); + + /// Returns an iterator to the first item in the `OrderedDict`. Iteration is + /// ordered. + ConstIterator begin() const; + + /// Returns an iterator one past the last item in the `OrderedDict`. + Iterator end(); + + /// Returns an iterator one past the last item in the `OrderedDict`. + ConstIterator end() const; + + // Capacity + + /// Returns the number of items currently stored in the `OrderedDict`. + size_t size() const noexcept; + + /// Returns true if the `OrderedDict` contains no elements. + bool is_empty() const noexcept; + + /// Resizes internal storage to fit at least `requested_capacity` items + /// without requiring reallocation. + void reserve(size_t requested_capacity); + + // Modifiers + + /// Inserts a new `(key, value)` pair into the `OrderedDict`. Throws an + /// exception if the key is already present. If insertion is successful, + /// immediately returns a reference to the inserted value. + template + Value& insert(K&& key, V&& value); + + /// Inserts a new `(key, value)` pair into the `OrderedDict`. Throws an + /// exception if the key is already present. If insertion is successful, + /// immediately returns a reference to the inserted value. + Value& insert(Key key, Value&& value); + + /// Inserts all items from `other` into this `OrderedDict`. If any key from + /// `other` is already present in this `OrderedDict`, an exception is thrown. + void update(OrderedDict&& other); + + /// Inserts all items from `other` into this `OrderedDict`. If any key from + /// `other` is already present in this `OrderedDict`, an exception is thrown. + void update(const OrderedDict& other); + + /// Removes the item that has `key` from this `OrderedDict` if exists and if + /// it doesn't an exception is thrown. + void erase(const Key& key); + + /// Removes all items from this `OrderedDict`. + void clear(); + + // Observers + + /// Returns the items stored in the `OrderedDict`. + const std::vector& items() const noexcept; + + /// Returns a newly allocated vector and copies all keys from this + /// `OrderedDict` into the vector. + ::std::vector keys() const; + + /// Returns a newly allocated vector and copies all values from this + /// `OrderedDict` into the vector. + ::std::vector values() const; + + /// Returns a newly allocated vector and copies all keys and values from this + /// `OrderedDict` into a vector of `std::pair`. + ::std::vector> pairs() const; + + /// Returns true if both dicts contain the same keys and values, in the same + /// order. + template + friend bool operator==( + const OrderedDict& a, + const OrderedDict& b); + + private: + /// A mapping from a key to an index into the `items_` vector. + ::std::unordered_map index_; + + /// The items stored in the `OrderedDict`. + ::std::vector items_; + + /// A description of the keys stored in the `OrderedDict`. + ::std::string key_description_{"Key"}; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OrderedDict::Item ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template +class OrderedDict::Item { + public: + /// Constructs a new item. + Item(Key key, Value value) : pair_(std::move(key), std::move(value)) {} + + /// Returns a reference to the value. + Value& operator*() { + return value(); + } + + /// Returns a reference to the value. + const Value& operator*() const { + return value(); + } + + /// Allows access to the value using the arrow operator. + Value* operator->() { + return &value(); + } + + /// Allows access to the value using the arrow operator. + const Value* operator->() const { + return &value(); + } + + /// Returns a reference to the key. + const Key& key() const noexcept { + return pair_.first; + } + + /// Returns a reference to the value. + Value& value() noexcept { + return pair_.second; + } + + /// Returns a reference to the value. + const Value& value() const noexcept { + return pair_.second; + } + + /// Returns a `(key, value)` pair. + const std::pair& pair() const noexcept { + return pair_; + } + + private: + /// This is stored as an std::pair because it will make Python binding a lot, + /// lot easier. + ::std::pair pair_; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OrderedDict ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template +OrderedDict::OrderedDict(std::string key_description) + : key_description_(std::move(key_description)) {} + +template +OrderedDict::OrderedDict(const OrderedDict& other) + : index_(other.index_), key_description_(other.key_description_) { + // Copy we have to do ourselves, because items' keys are const, so we have to + // re-insert the items. + for (const auto& item : other.items_) { + items_.push_back(item); + } +} + +template +OrderedDict& OrderedDict::operator=( + const OrderedDict& other) { + index_ = other.index_; + items_.clear(); + for (auto& item : other.items_) { + items_.push_back(item); + } + key_description_ = other.key_description_; + return *this; +} + +template +OrderedDict::OrderedDict( + std::initializer_list initializer_list) + : OrderedDict("Key") { + items_.reserve(initializer_list.size()); + for (auto& item : initializer_list) { + // Copy the key here and move it into the index. + items_.emplace_back(item.key(), std::move(item.value())); + index_.emplace(std::move(item.key()), size() - 1); + } +} + +template +typename OrderedDict::Iterator OrderedDict::begin() { + return items_.begin(); +} + +template +typename OrderedDict::ConstIterator OrderedDict::begin() + const { + return items_.begin(); +} + +template +typename OrderedDict::Iterator OrderedDict::end() { + return items_.end(); +} + +template +typename OrderedDict::ConstIterator OrderedDict::end() + const { + return items_.end(); +} + +template +typename OrderedDict::Item& OrderedDict::front() { + TORCH_CHECK(!items_.empty(), "Called front() on an empty OrderedDict"); + return items_.front(); +} + +template +const typename OrderedDict::Item& OrderedDict::front() + const { + TORCH_CHECK(!items_.empty(), "Called front() on an empty OrderedDict"); + return items_.front(); +} + +template +typename OrderedDict::Item& OrderedDict::back() { + TORCH_CHECK(!items_.empty(), "Called back() on an empty OrderedDict"); + return items_.back(); +} + +template +const typename OrderedDict::Item& OrderedDict::back() + const { + TORCH_CHECK(!items_.empty(), "Called back() on an empty OrderedDict"); + return items_.back(); +} + +template +typename OrderedDict::Item& OrderedDict::operator[]( + size_t index) { + TORCH_CHECK(index < items_.size(), "Index ", index, " is out of bounds"); + return items_[index]; +} + +template +const typename OrderedDict::Item& OrderedDict:: +operator[](size_t index) const { + TORCH_CHECK(index < items_.size(), "Index ", index, " is out of bounds"); + return items_[index]; +} + +template +Value& OrderedDict::operator[](const Key& key) { + if (auto* value = find(key)) { + return *value; + } + AT_ERROR(key_description_, " '", key, "' is not defined"); +} + +template +const Value& OrderedDict::operator[](const Key& key) const { + if (auto* value = find(key)) { + return *value; + } + AT_ERROR(key_description_, " '", key, "' is not defined"); +} + +template +template +Value& OrderedDict::insert(K&& key, V&& value) { + TORCH_CHECK( + index_.count(key) == 0, key_description_, " '", key, "' already defined"); + // Copy `key` here and move it into the index. + items_.emplace_back(key, std::forward(value)); + index_.emplace(std::forward(key), size() - 1); + return items_.back().value(); +} + +template +Value& OrderedDict::insert(Key key, Value&& value) { + return insert(std::move(key), std::move(value)); +} + +template +void OrderedDict::update(OrderedDict&& other) { + reserve(size() + other.size()); + for (auto& item : other) { + // We want to call `insert()` to prevent duplicate keys. + insert(std::move(item.key()), std::move(item.value())); + } +} + +template +void OrderedDict::update(const OrderedDict& other) { + reserve(size() + other.size()); + for (auto& item : other) { + // We want to call `insert()` to prevent duplicate keys. + insert(item.key(), item.value()); + } +} + +template +Value* OrderedDict::find(const Key& key) noexcept { + auto iterator = index_.find(key); + if (iterator == index_.end()) { + return nullptr; + } + return &items_[iterator->second].value(); +} + +template +const Value* OrderedDict::find(const Key& key) const noexcept { + auto iterator = index_.find(key); + if (iterator == index_.end()) { + return nullptr; + } + return &items_[iterator->second].value(); +} + +template +void OrderedDict::erase(const Key& key) { + auto it = index_.find(key); + TORCH_CHECK(it != index_.end(), "Key '", key, "' doesn't exist"); + + auto index = it->second; + index_.erase(it); + items_.erase(items_.begin() + index); + + for (auto& pair : index_) + if (pair.second > index) + --pair.second; +} + +template +bool OrderedDict::contains(const Key& key) const noexcept { + return find(key) != nullptr; +} + +template +void OrderedDict::clear() { + index_.clear(); + items_.clear(); +} + +template +size_t OrderedDict::size() const noexcept { + return items_.size(); +} + +template +bool OrderedDict::is_empty() const noexcept { + return items_.empty(); +} + +template +const std::string& OrderedDict::key_description() const noexcept { + return key_description_; +} + +template +const std::vector::Item>& OrderedDict< + Key, + Value>::items() const noexcept { + return items_; +} + +template +::std::vector OrderedDict::keys() const { + std::vector keys; + keys.reserve(size()); + for (const auto& item : items_) { + keys.push_back(item.key()); + } + return keys; +} + +template +::std::vector OrderedDict::values() const { + std::vector values; + values.reserve(size()); + for (const auto& item : items_) { + values.push_back(item.value()); + } + return values; +} + +template +::std::vector> OrderedDict::pairs() const { + std::vector> values; + values.reserve(size()); + for (const auto& item : items_) { + values.push_back(item.pair()); + } + return values; +} + +template +void OrderedDict::reserve(size_t requested_capacity) { + index_.reserve(requested_capacity); + items_.reserve(requested_capacity); +} + +template +bool operator==( + const torch::OrderedDict& a, + const torch::OrderedDict& b) { + using Item = typename torch::OrderedDict::Item; + if (a.index_ != b.index_) + return false; + if (a.items_.size() != b.items_.size()) + return false; + // NOTE: There's no point in comparing keys for items_, as we already know + // that index is equal. + return std::equal( + a.items_.begin(), + a.items_.end(), + b.items_.begin(), + [](const Item& a, const Item& b) { return a.value() == b.value(); }); +} + +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/python.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/python.h new file mode 100644 index 0000000000000000000000000000000000000000..15902a026cf597c4f1eacf0063e65b9f0fa2948e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/python.h @@ -0,0 +1,262 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace torch { +namespace python { +namespace detail { +inline Device py_object_to_device(py::object object) { + PyObject* obj = object.ptr(); + if (THPDevice_Check(obj)) { + return reinterpret_cast(obj)->device; + } + throw TypeError("Expected device"); +} + +inline Dtype py_object_to_dtype(py::object object) { + PyObject* obj = object.ptr(); + if (THPDtype_Check(obj)) { + return reinterpret_cast(obj)->scalar_type; + } + throw TypeError("Expected dtype"); +} + +template +using PyModuleClass = + py::class_>; + +/// Dynamically creates a subclass of `torch.nn.cpp.ModuleWrapper` that is also +/// a subclass of `torch.nn.Module`, and passes it the user-provided C++ module +/// to which it delegates all calls. +template +void bind_cpp_module_wrapper( + py::module module, + PyModuleClass cpp_class, + const char* name) { + // Grab the `torch.nn.cpp.ModuleWrapper` class, which we'll subclass + // with a dynamically created class below. + py::object cpp_module = + py::module::import("torch.nn.cpp").attr("ModuleWrapper"); + + // Grab the `type` class which we'll use as a metaclass to create a new class + // dynamically. + py::object type_metaclass = + py::reinterpret_borrow((PyObject*)&PyType_Type); + + // The `ModuleWrapper` constructor copies all functions to its own `__dict__` + // in its constructor, but we do need to give our dynamic class a constructor. + // Inside, we construct an instance of the original C++ module we're binding + // (the `torch::nn::Module` subclass), and then forward it to the + // `ModuleWrapper` constructor. + py::dict attributes; + + // `type()` always needs a `str`, but pybind11's `str()` method always creates + // a `unicode` object. + py::object name_str = py::str(name); + + // Dynamically create the subclass of `ModuleWrapper`, which is a subclass of + // `torch.nn.Module`, and will delegate all calls to the C++ module we're + // binding. + py::object wrapper_class = + type_metaclass(name_str, py::make_tuple(cpp_module), attributes); + + // The constructor of the dynamic class calls `ModuleWrapper.__init__()`, + // which replaces its methods with those of the C++ module. + wrapper_class.attr("__init__") = py::cpp_function( + [cpp_module, cpp_class]( + py::object self, py::args args, py::kwargs kwargs) { + cpp_module.attr("__init__")(self, cpp_class(*args, **kwargs)); + }, + py::is_method(wrapper_class)); + + // Calling `my_module.my_class` now means that `my_class` is a subclass of + // `ModuleWrapper`, and whose methods call into the C++ module we're binding. + module.attr(name) = wrapper_class; +} +} // namespace detail + +/// Adds method bindings for a pybind11 `class_` that binds an `nn::Module` +/// subclass. +/// +/// Say you have a pybind11 class object created with `py::class_(m, +/// "Net")`. This function will add all the necessary `.def()` calls to bind the +/// `nn::Module` base class' methods, such as `train()`, `eval()` etc. into +/// Python. +/// +/// Users should prefer to use `bind_module` if possible. +template +py::class_ add_module_bindings( + py::class_ module) { + // clang-format off + return module + .def("train", + [](ModuleType& module, bool mode) { module.train(mode); }, + py::arg("mode") = true) + .def("eval", [](ModuleType& module) { module.eval(); }) + .def("clone", [](ModuleType& module) { return module.clone(); }) + .def_property_readonly( + "training", [](ModuleType& module) { return module.is_training(); }) + .def("zero_grad", [](ModuleType& module) { module.zero_grad(); }) + .def_property_readonly( "_parameters", [](ModuleType& module) { + return module.named_parameters(/*recurse=*/false); + }) + .def("parameters", [](ModuleType& module, bool recurse) { + return module.parameters(recurse); + }, + py::arg("recurse") = true) + .def("named_parameters", [](ModuleType& module, bool recurse) { + return module.named_parameters(recurse); + }, + py::arg("recurse") = true) + .def_property_readonly("_buffers", [](ModuleType& module) { + return module.named_buffers(/*recurse=*/false); + }) + .def("buffers", [](ModuleType& module, bool recurse) { + return module.buffers(recurse); }, + py::arg("recurse") = true) + .def("named_buffers", [](ModuleType& module, bool recurse) { + return module.named_buffers(recurse); + }, + py::arg("recurse") = true) + .def_property_readonly( + "_modules", [](ModuleType& module) { return module.named_children(); }) + .def("modules", [](ModuleType& module) { return module.modules(); }) + .def("named_modules", + [](ModuleType& module, py::object /* unused */, std::string prefix, bool remove_duplicate /* unused */) { + return module.named_modules(std::move(prefix)); + }, + py::arg("memo") = py::none(), + py::arg("prefix") = std::string(), + py::arg("remove_duplicate") = true) + .def("children", [](ModuleType& module) { return module.children(); }) + .def("named_children", + [](ModuleType& module) { return module.named_children(); }) + .def("to", [](ModuleType& module, py::object object, bool non_blocking) { + if (THPDevice_Check(object.ptr())) { + module.to( + reinterpret_cast(object.ptr())->device, + non_blocking); + } else { + module.to(detail::py_object_to_dtype(object), non_blocking); + } + }, + py::arg("dtype_or_device"), + py::arg("non_blocking") = false) + .def("to", + [](ModuleType& module, + py::object device, + py::object dtype, + bool non_blocking) { + if (device.is_none()) { + module.to(detail::py_object_to_dtype(dtype), non_blocking); + } else if (dtype.is_none()) { + module.to(detail::py_object_to_device(device), non_blocking); + } else { + module.to( + detail::py_object_to_device(device), + detail::py_object_to_dtype(dtype), + non_blocking); + } + }, + py::arg("device"), + py::arg("dtype"), + py::arg("non_blocking") = false) + .def("cuda", [](ModuleType& module) { module.to(kCUDA); }) + .def("cpu", [](ModuleType& module) { module.to(kCPU); }) + .def("float", [](ModuleType& module) { module.to(kFloat32); }) + .def("double", [](ModuleType& module) { module.to(kFloat64); }) + .def("half", [](ModuleType& module) { module.to(kFloat16); }) + .def("__str__", [](ModuleType& module) { return module.name(); }) + .def("__repr__", [](ModuleType& module) { return module.name(); }); + // clang-format on +} + +/// Creates a pybind11 class object for an `nn::Module` subclass type and adds +/// default bindings. +/// +/// After adding the default bindings, the class object is returned, such that +/// you can add more bindings. +/// +/// Example usage: +/// \rst +/// .. code-block:: cpp +/// +/// struct Net : torch::nn::Module { +/// Net(int in, int out) { } +/// torch::Tensor forward(torch::Tensor x) { return x; } +/// }; +/// +/// PYBIND11_MODULE(my_module, m) { +/// torch::python::bind_module(m, "Net") +/// .def(py::init()) +/// .def("forward", &Net::forward); +/// } +/// \endrst +template +torch::disable_if_t< + torch::detail::has_forward::value && !force_enable, + detail::PyModuleClass> +bind_module(py::module module, const char* name) { + py::module cpp = module.def_submodule("cpp"); + auto cpp_class = + add_module_bindings(detail::PyModuleClass(cpp, name)); + detail::bind_cpp_module_wrapper(module, cpp_class, name); + return cpp_class; +} + +/// Creates a pybind11 class object for an `nn::Module` subclass type and adds +/// default bindings. +/// +/// After adding the default bindings, the class object is returned, such that +/// you can add more bindings. +/// +/// If the class has a `forward()` method, it is automatically exposed as +/// `forward()` and `__call__` in Python. +/// +/// Example usage: +/// \rst +/// .. code-block:: cpp +/// +/// struct Net : torch::nn::Module { +/// Net(int in, int out) { } +/// torch::Tensor forward(torch::Tensor x) { return x; } +/// }; +/// +/// PYBIND11_MODULE(my_module, m) { +/// torch::python::bind_module(m, "Net") +/// .def(py::init()) +/// .def("forward", &Net::forward); +/// } +/// \endrst +template < + typename ModuleType, + typename = + torch::enable_if_t::value>> +detail::PyModuleClass bind_module( + py::module module, + const char* name) { + return bind_module(module, name) + .def("forward", &ModuleType::forward) + .def("__call__", &ModuleType::forward); +} +} // namespace python +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize.h new file mode 100644 index 0000000000000000000000000000000000000000..60ec25b8ffe7924249afcdbb6d50b6d353850ceb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize.h @@ -0,0 +1,144 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch { + +/// Serializes the given `value`. +/// There must be an overload of `operator<<` between `serialize::OutputArchive` +/// and `Value` for this method to be well-formed. Currently, such an overload +/// is provided for (subclasses of): +/// +/// - `torch::nn::Module`, +/// - `torch::optim::Optimizer` +/// - `torch::Tensor` +/// +/// To perform the serialization, a `serialize::OutputArchive` is constructed, +/// and all arguments after the `value` are forwarded to its `save_to` method. +/// For example, you can pass a filename, or an `ostream`. +/// +/// \rst +/// .. code-block:: cpp +/// +/// torch::nn::Linear model(3, 4); +/// torch::save(model, "model.pt"); +/// +/// torch::optim::SGD sgd(/*lr=*/0.9); +/// std::ostringstream stream; +/// // Note that the same stream cannot be used in multiple torch::save(...) +/// // invocations, otherwise the header will be corrupted. +/// torch::save(sgd, stream); +/// +/// auto tensor = torch::ones({3, 4}); +/// torch::save(tensor, "my_tensor.pt"); +/// \endrst +template +void save(const Value& value, SaveToArgs&&... args) { + serialize::OutputArchive archive(std::make_shared()); + archive << value; + archive.save_to(std::forward(args)...); +} + +/// Serializes the given `tensor_vec` of type `std::vector`. +/// +/// To perform the serialization, a `serialize::OutputArchive` is constructed, +/// and all arguments after the `tensor_vec` are forwarded to its `save_to` +/// method. For example, you can pass a filename, or an `ostream`. +/// +/// \rst +/// .. code-block:: cpp +/// +/// std::vector tensor_vec = { torch::randn({1, 2}), +/// torch::randn({3, 4}) }; torch::save(tensor_vec, "my_tensor_vec.pt"); +/// +/// std::vector tensor_vec = { torch::randn({5, 6}), +/// torch::randn({7, 8}) }; std::ostringstream stream; +/// // Note that the same stream cannot be used in multiple torch::save(...) +/// // invocations, otherwise the header will be corrupted. +/// torch::save(tensor_vec, stream); +/// \endrst +template +void save(const std::vector& tensor_vec, SaveToArgs&&... args) { + serialize::OutputArchive archive(std::make_shared()); + for (const auto i : c10::irange(tensor_vec.size())) { + auto& value = tensor_vec[i]; + archive.write(std::to_string(i), value); + } + archive.save_to(std::forward(args)...); +} + +TORCH_API std::vector pickle_save(const torch::IValue& ivalue); +TORCH_API torch::IValue pickle_load(const std::vector& data); + +/// Deserializes the given `value`. +/// There must be an overload of `operator>>` between `serialize::InputArchive` +/// and `Value` for this method to be well-formed. Currently, such an overload +/// is provided for (subclasses of): +/// +/// - `torch::nn::Module`, +/// - `torch::optim::Optimizer` +/// - `torch::Tensor` +/// +/// To perform the serialization, a `serialize::InputArchive` is constructed, +/// and all arguments after the `value` are forwarded to its `load_from` method. +/// For example, you can pass a filename, or an `istream`. +/// +/// \rst +/// .. code-block:: cpp +/// +/// torch::nn::Linear model(3, 4); +/// torch::load(model, "model.pt"); +/// +/// torch::optim::SGD sgd(/*lr=*/0.9); +/// std::istringstream stream("..."); +/// torch::load(sgd, stream); +/// +/// auto tensor = torch::ones({3, 4}); +/// torch::load(tensor, "my_tensor.pt"); +/// \endrst +template +void load(Value& value, LoadFromArgs&&... args) { + serialize::InputArchive archive; + archive.load_from(std::forward(args)...); + archive >> value; +} + +/// Deserializes the given `tensor_vec` of type `std::vector`. +/// +/// To perform the serialization, a `serialize::InputArchive` is constructed, +/// and all arguments after the `value` are forwarded to its `load_from` method. +/// For example, you can pass a filename, or an `istream`. +/// +/// \rst +/// .. code-block:: cpp +/// +/// std::vector tensor_vec; +/// torch::load(tensor_vec, "my_tensor_vec.pt"); +/// +/// std::vector tensor_vec; +/// std::istringstream stream("..."); +/// torch::load(tensor_vec, stream); +/// \endrst +template +void load(std::vector& tensor_vec, LoadFromArgs&&... args) { + serialize::InputArchive archive; + archive.load_from(std::forward(args)...); + + // NOTE: The number of elements in the serialized `std::vector` + // is not known ahead of time, so we need a while-loop to increment the index, + // and use `archive.try_read(...)` to check whether we have reached the end of + // the serialized `std::vector`. + size_t index = 0; + torch::Tensor value; + while (archive.try_read(std::to_string(index), value)) { + tensor_vec.push_back(std::move(value)); + value = torch::Tensor(); + index++; + } +} +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/sparse.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/sparse.h new file mode 100644 index 0000000000000000000000000000000000000000..a30e74477e3658ef411fe14d2715cde323ce618b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/sparse.h @@ -0,0 +1,7 @@ +#pragma once + +#include + +namespace torch { +namespace sparse {} +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/special.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/special.h new file mode 100644 index 0000000000000000000000000000000000000000..12e3439130af5f3fb301aa44077ff7e88a416bc4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/special.h @@ -0,0 +1,1405 @@ +#pragma once + +#include +#include + +namespace torch { +namespace special { + +/// Computes the natural logarithm of the absolute value of the gamma function +/// See https://pytorch.org/docs/master/special.html#torch.special.gammaln. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::gammaln(t); +/// ``` +inline Tensor gammaln(const Tensor& self) { + return torch::special_gammaln(self); +} + +inline Tensor& gammaln_out(Tensor& result, const Tensor& self) { + return torch::special_gammaln_out(result, self); +} + +/// Computes the regularized lower incomplete gamma function +/// See https://pytorch.org/docs/master/special.html#torch.special.gammainc. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// auto s = torch::randn(128, dtype=kDouble); +/// torch::special::gammainc(s, t); +/// ``` +inline Tensor gammainc(const Tensor& self, const Tensor& other) { + return torch::special_gammainc(self, other); +} + +inline Tensor& gammainc_out( + Tensor& result, + const Tensor& self, + const Tensor& other) { + return torch::special_gammainc_out(result, self, other); +} + +/// Computes the regularized upper incomplete gamma function +/// See https://pytorch.org/docs/master/special.html#torch.special.gammainc. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// auto s = torch::randn(128, dtype=kDouble); +/// torch::special::gammaincc(s, t); +/// ``` +inline Tensor gammaincc(const Tensor& self, const Tensor& other) { + return torch::special_gammaincc(self, other); +} + +inline Tensor& gammaincc_out( + Tensor& result, + const Tensor& self, + const Tensor& other) { + return torch::special_gammaincc_out(result, self, other); +} + +/// Computes the multivariate log-gamma function with dimension `p`, elementwise +/// See https://pytorch.org/docs/master/special.html#torch.special.multigammaln. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::multigammaln(t, 1); +/// ``` +inline Tensor multigammaln(const Tensor& self, int64_t p) { + return torch::special_multigammaln(self, p); +} + +inline Tensor& multigammaln_out(Tensor& result, const Tensor& self, int64_t p) { + return torch::special_multigammaln_out(result, self, p); +} + +/// Computes the nth derivative of the digamma function on the input. +/// See https:://pytorch.org/docs/master/special.html#torch.special.polygamma. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::polygamma(2, t); +/// ``` +inline Tensor polygamma(int64_t n, const Tensor& self) { + return torch::special_polygamma(n, self); +} + +inline Tensor& polygamma_out(Tensor& result, int64_t n, const Tensor& self) { + return torch::special_polygamma_out(result, n, self); +} + +/// Computes the logarithmic derivative of the gamma function on input +/// See https://pytorch.org/docs/master/special.html#torch.special.psi +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::psi(t); +/// ``` +inline Tensor psi(const Tensor& self) { + return torch::special_psi(self); +} + +inline Tensor& psi_out(Tensor& result, const Tensor& self) { + return torch::special_psi_out(result, self); +} + +/// Computes the logarithmic derivative of the gamma function on input +/// See https://pytorch.org/docs/master/special.html#torch.special.digamma +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::digamma(t); +/// ``` +inline Tensor digamma(const Tensor& self) { + return torch::special_digamma(self); +} + +inline Tensor& digamma_out(Tensor& result, const Tensor& self) { + return torch::special_digamma_out(result, self); +} + +/// Computes entropy of input, elementwise +/// See https://pytorch.org/docs/master/special.html#torch.special.entr. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::entr(t); +/// ``` +inline Tensor entr(const Tensor& self) { + return torch::special_entr(self); +} + +inline Tensor& entr_out(Tensor& result, const Tensor& self) { + return torch::special_entr_out(result, self); +} + +/// Computes the error function +/// See https://pytorch.org/docs/master/special.html#torch.special.erf. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::erf(t); +/// ``` +inline Tensor erf(const Tensor& self) { + return torch::special_erf(self); +} + +inline Tensor& erf_out(Tensor& result, const Tensor& self) { + return torch::special_erf_out(result, self); +} + +/// Computes the complementary error function +/// See https://pytorch.org/docs/master/special.html#torch.special.erfc. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::erfc(t); +/// ``` +inline Tensor erfc(const Tensor& self) { + return torch::special_erfc(self); +} + +inline Tensor& erfc_out(Tensor& result, const Tensor& self) { + return torch::special_erfc_out(result, self); +} + +/// Computes the scaled complementary error function +/// See https://pytorch.org/docs/master/special.html#torch.special.erfcx. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::erfcx(t); +/// ``` +inline Tensor erfcx(const Tensor& self) { + return torch::special_erfcx(self); +} + +inline Tensor& erfcx_out(Tensor& result, const Tensor& self) { + return torch::special_erfcx_out(result, self); +} + +/// Computes the inverse error function +/// See https://pytorch.org/docs/master/special.html#torch.special.erfinv. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::erfinv(t); +/// ``` +inline Tensor erfinv(const Tensor& self) { + return torch::special_erfinv(self); +} + +inline Tensor& erfinv_out(Tensor& result, const Tensor& self) { + return torch::special_erfinv_out(result, self); +} + +/// Computes the log of summed exponentials of each row of input in the given +/// dimension dim See +/// https://pytorch.org/docs/master/special.html#torch.special.logsumexp. +/// +/// Example: +/// ``` +/// auto t = torch::randn(3, 3); +/// torch::special::logsumexp(t, 1); +/// ``` +inline Tensor logsumexp(const Tensor& self, IntArrayRef dims, bool keepdim) { + return torch::special_logsumexp(self, dims, keepdim); +} + +inline Tensor& logsumexp_out( + Tensor& result, + const Tensor& self, + IntArrayRef dims, + bool keepdim) { + return torch::special_logsumexp_out(result, self, dims, keepdim); +} + +/// Computes the argument, x, for which the area under the Gaussian probability +/// density function (integrated from minus infinity to x) is equal to input, +/// elementwise. See +/// https://pytorch.org/docs/master/special.html#torch.special.ndtri +/// +/// Example: +/// ``` +/// auto t = torch::rand(128, dtype=kDouble); +/// torch::special::ndtri(t); +/// ``` +inline Tensor ndtri(const Tensor& self) { + return torch::special_ndtri(self); +} + +inline Tensor& ndtri_out(Tensor& result, const Tensor& self) { + return torch::special_ndtri_out(result, self); +} + +/// Computes the log of area under the standard Gaussian probability density +/// function, integrated from minus infinity to :attr:`input`, elementwise See +/// https://pytorch.org/docs/master/special.html#torch.special.log_ndtr +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::log_ndtr(t); +/// ``` +inline Tensor log_ndtr(const Tensor& self) { + return torch::special_log_ndtr(self); +} + +inline Tensor& log_ndtr_out(Tensor& result, const Tensor& self) { + return torch::special_log_ndtr_out(result, self); +} + +/// Computes the logit of input, elementwise. +/// See https://pytorch.org/docs/master/special.html#torch.special.logit. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::logit(t); +/// ``` +inline Tensor logit(const Tensor& self) { + return torch::special_logit(self); +} + +inline Tensor& logit_out(Tensor& result, const Tensor& self) { + return torch::special_logit_out(result, self); +} + +/// Computes the expit (also known as the logistic sigmoid function) of input, +/// elementwise See +/// https://pytorch.org/docs/master/special.html#torch.special.expit. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::expit(t); +/// ``` +inline Tensor expit(const Tensor& self) { + return torch::special_expit(self); +} + +inline Tensor& expit_out(Tensor& result, const Tensor& self) { + return torch::special_expit_out(result, self); +} + +/// Computes the base two exponential function of :attr:`input`, elementwise +/// See https://pytorch.org/docs/master/special.html#torch.special.exp2. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::exp2(t); +/// ``` +inline Tensor exp2(const Tensor& self) { + return torch::special_exp2(self); +} + +inline Tensor& exp2_out(Tensor& result, const Tensor& self) { + return torch::special_exp2_out(result, self); +} + +/// Computes the exponential of the elements minus 1, elementwise +/// See https://pytorch.org/docs/master/special.html#torch.special.expm1. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::expm1(t); +/// ``` +inline Tensor expm1(const Tensor& self) { + return torch::special_expm1(self); +} + +inline Tensor& expm1_out(Tensor& result, const Tensor& self) { + return torch::special_expm1_out(result, self); +} + +/// Computes x * log(y) for inputs, elementwise +/// See https://pytorch.org/docs/master/special.html#torch.special.xlogy. +/// +/// Example: +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto y = torch::randn(128, dtype=kDouble); +/// torch::special::xlogy(x, y); +/// ``` +inline Tensor xlogy(const Tensor& self, const Tensor& other) { + return torch::special_xlogy(self, other); +} + +inline Tensor xlogy(const Scalar& self, const Tensor& other) { + return torch::special_xlogy(self, other); +} + +inline Tensor xlogy(const Tensor& self, const Scalar& other) { + return torch::special_xlogy(self, other); +} + +inline Tensor& xlogy_out( + Tensor& result, + const Tensor& self, + const Tensor& other) { + return torch::special_xlogy_out(result, self, other); +} + +inline Tensor& xlogy_out( + Tensor& result, + const Scalar& self, + const Tensor& other) { + return torch::special_xlogy_out(result, self, other); +} + +inline Tensor& xlogy_out( + Tensor& result, + const Tensor& self, + const Scalar& other) { + return torch::special_xlogy_out(result, self, other); +} + +/// Computes x * log1p(y) for inputs, elementwise +/// See https://pytorch.org/docs/master/special.html#torch.special.xlog1py. +/// +/// Example: +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto y = torch::randn(128, dtype=kDouble); +/// torch::special::xlog1py(x, y); +/// ``` +inline Tensor xlog1py(const Tensor& self, const Tensor& other) { + return torch::special_xlog1py(self, other); +} + +inline Tensor xlog1py(const Scalar& self, const Tensor& other) { + return torch::special_xlog1py(self, other); +} + +inline Tensor xlog1py(const Tensor& self, const Scalar& other) { + return torch::special_xlog1py(self, other); +} + +inline Tensor& xlog1py_out( + Tensor& result, + const Tensor& self, + const Tensor& other) { + return torch::special_xlog1py_out(result, self, other); +} + +inline Tensor& xlog1py_out( + Tensor& result, + const Scalar& self, + const Tensor& other) { + return torch::special_xlog1py_out(result, self, other); +} + +inline Tensor& xlog1py_out( + Tensor& result, + const Tensor& self, + const Scalar& other) { + return torch::special_xlog1py_out(result, self, other); +} + +/// Computes Hurwitz Zeta function for inputs, elementwise +/// See https://pytorch.org/docs/master/special.html#torch.special.zeta. +/// +/// Example: +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto y = torch::randn(128, dtype=kDouble); +/// torch::special::zeta(x, y); +/// ``` +inline Tensor zeta(const Tensor& self, const Tensor& other) { + return torch::special_zeta(self, other); +} + +inline Tensor zeta(const Scalar& self, const Tensor& other) { + return torch::special_zeta(self, other); +} + +inline Tensor zeta(const Tensor& self, const Scalar& other) { + return torch::special_zeta(self, other); +} + +inline Tensor& zeta_out( + Tensor& result, + const Tensor& self, + const Tensor& other) { + return torch::special_zeta_out(result, self, other); +} + +inline Tensor& zeta_out( + Tensor& result, + const Scalar& self, + const Tensor& other) { + return torch::special_zeta_out(result, self, other); +} + +inline Tensor& zeta_out( + Tensor& result, + const Tensor& self, + const Scalar& other) { + return torch::special_zeta_out(result, self, other); +} + +/// Computes the zeroth order modified Bessel function of the first kind of +/// input, elementwise See +/// https://pytorch.org/docs/master/special.html#torch.special.i0 +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::i0(t); +/// ``` +inline Tensor i0(const Tensor& self) { + return torch::special_i0(self); +} + +inline Tensor& i0_out(Tensor& result, const Tensor& self) { + return torch::special_i0_out(result, self); +} + +/// Computes the area under the standard Gaussian probability density function, +/// integrated from minus infinity to :attr:`input`, elementwise +/// See https://pytorch.org/docs/master/special.html#torch.special.ndtr +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::ndtr(t); +/// ``` +inline Tensor ndtr(const Tensor& self) { + return torch::special_ndtr(self); +} + +inline Tensor& ndtr_out(Tensor& result, const Tensor& self) { + return torch::special_ndtr_out(result, self); +} + +/// Computes the exponentially scaled zeroth order modified Bessel function of +/// the first kind See +/// https://pytorch.org/docs/master/special.html#torch.special.i0e. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::i0e(t); +/// ``` +inline Tensor i0e(const Tensor& self) { + return torch::special_i0e(self); +} + +inline Tensor& i0e_out(Tensor& result, const Tensor& self) { + return torch::special_i0e_out(result, self); +} + +/// Computes the first order modified Bessel function of the first kind +/// See https://pytorch.org/docs/master/special.html#torch.special.i1. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::i1(t); +/// ``` +inline Tensor i1(const Tensor& self) { + return torch::special_i1(self); +} + +inline Tensor& i1_out(Tensor& result, const Tensor& self) { + return torch::special_i1_out(result, self); +} + +/// Computes the exponentially scaled first order modified Bessel function of +/// the first kind See +/// https://pytorch.org/docs/master/special.html#torch.special.i1e. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::i1e(t); +/// ``` +inline Tensor i1e(const Tensor& self) { + return torch::special_i1e(self); +} + +inline Tensor& i1e_out(Tensor& result, const Tensor& self) { + return torch::special_i1e_out(result, self); +} + +/// Computes the sinc of input, elementwise +/// See https://pytorch.org/docs/master/special.html#torch.special.sinc. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::sinc(t); +/// ``` +inline Tensor sinc(const Tensor& self) { + return torch::special_sinc(self); +} + +inline Tensor& sinc_out(Tensor& result, const Tensor& self) { + return torch::special_sinc_out(result, self); +} + +/// Rounds the elements of the input +/// See https://pytorch.org/docs/master/special.html#torch.special.round. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::round(t); +/// ``` +inline Tensor round(const Tensor& self) { + return torch::special_round(self); +} + +inline Tensor& round_out(Tensor& result, const Tensor& self) { + return torch::special_round_out(result, self); +} + +/// Computes log(1 + x) of the input, elementwise +/// See https://pytorch.org/docs/master/special.html#torch.special.log1p. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kDouble); +/// torch::special::log1p(t); +/// ``` +inline Tensor log1p(const Tensor& self) { + return torch::special_log1p(self); +} + +inline Tensor& log1p_out(Tensor& result, const Tensor& self) { + return torch::special_log1p_out(result, self); +} + +/// Computes log followed by softmax(x) of the input +/// See https://pytorch.org/docs/master/special.html#torch.special.log_softmax. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, 128, dtype=kDouble); +/// torch::special::log_softmax(t, 0); +/// ``` +inline Tensor log_softmax( + const Tensor& self, + int64_t dim, + c10::optional dtype) { + return torch::special_log_softmax(self, dim, dtype); +} + +/// Computes softmax of the input along a given dimension +/// See https://pytorch.org/docs/master/special.html#torch.special.softmax. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, 128, dtype=kDouble); +/// torch::special::softmax(t, 0); +/// ``` +inline Tensor softmax( + const Tensor& self, + int64_t dim, + c10::optional dtype) { + return torch::special_softmax(self, dim, dtype); +} + +/// Airy function Ai. +/// +/// See https://pytorch.org/docs/master/special.html#torch.special.airy_ai. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// +/// torch::special::airy_ai(x); +/// ``` +inline Tensor airy_ai(const Tensor& x) { + return torch::special_airy_ai(x); +} + +inline Tensor& airy_ai_out(Tensor& y, const Tensor& x) { + return torch::special_airy_ai_out(y, x); +} + +/// Bessel function of the first kind of order 0. +/// +/// See https://pytorch.org/docs/master/special.html#torch.special.bessel_j0. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// +/// torch::special::bessel_j0(x); +/// ``` +inline Tensor bessel_j0(const Tensor& self) { + return torch::special_bessel_j0(self); +} + +inline Tensor& bessel_j0_out(Tensor& result, const Tensor& self) { + return torch::special_bessel_j0_out(result, self); +} + +/// Bessel function of the first kind of order 1. +/// +/// See https://pytorch.org/docs/master/special.html#torch.special.bessel_j1. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// +/// torch::special::bessel_j1(x); +/// ``` +inline Tensor bessel_j1(const Tensor& self) { + return torch::special_bessel_j1(self); +} + +inline Tensor& bessel_j1_out(Tensor& result, const Tensor& self) { + return torch::special_bessel_j1_out(result, self); +} + +/// Bessel function of the second kind of order 0. +/// +/// See https://pytorch.org/docs/master/special.html#torch.special.bessel_y0. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// +/// torch::special::bessel_y0(x); +/// ``` +inline Tensor bessel_y0(const Tensor& self) { + return torch::special_bessel_y0(self); +} + +inline Tensor& bessel_y0_out(Tensor& result, const Tensor& self) { + return torch::special_bessel_y0_out(result, self); +} + +/// Bessel function of the second kind of order 1. +/// +/// See https://pytorch.org/docs/master/special.html#torch.special.bessel_y1. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// +/// torch::special::bessel_y1(x); +/// ``` +inline Tensor bessel_y1(const Tensor& self) { + return torch::special_bessel_y1(self); +} + +inline Tensor& bessel_y1_out(Tensor& result, const Tensor& self) { + return torch::special_bessel_y1_out(result, self); +} + +/// Chebyshev polynomial of the first kind. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.chebyshev_polynomial_t. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto n = torch::randn(128, dtype=kDouble); +/// +/// torch::special::chebyshev_polynomial_t(x, n); +/// ``` +inline Tensor chebyshev_polynomial_t(const Tensor& x, const Tensor& n) { + return torch::special_chebyshev_polynomial_t(x, n); +} + +inline Tensor chebyshev_polynomial_t(const Scalar& x, const Tensor& n) { + return torch::special_chebyshev_polynomial_t(x, n); +} + +inline Tensor chebyshev_polynomial_t(const Tensor& x, const Scalar& n) { + return torch::special_chebyshev_polynomial_t(x, n); +} + +inline Tensor& chebyshev_polynomial_t_out( + Tensor& output, + const Tensor& x, + const Tensor& n) { + return torch::special_chebyshev_polynomial_t_out(output, x, n); +} + +inline Tensor& chebyshev_polynomial_t_out( + Tensor& output, + const Scalar& x, + const Tensor& n) { + return torch::special_chebyshev_polynomial_t_out(output, x, n); +} + +inline Tensor& chebyshev_polynomial_t_out( + Tensor& output, + const Tensor& x, + const Scalar& n) { + return torch::special_chebyshev_polynomial_t_out(output, x, n); +} + +/// Chebyshev polynomial of the second kind. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.chebyshev_polynomial_u. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto n = torch::randn(128, dtype=kDouble); +/// +/// torch::special::chebyshev_polynomial_u(x, n); +/// ``` +inline Tensor chebyshev_polynomial_u(const Tensor& x, const Tensor& n) { + return torch::special_chebyshev_polynomial_u(x, n); +} + +inline Tensor chebyshev_polynomial_u(const Scalar& x, const Tensor& n) { + return torch::special_chebyshev_polynomial_u(x, n); +} + +inline Tensor chebyshev_polynomial_u(const Tensor& x, const Scalar& n) { + return torch::special_chebyshev_polynomial_u(x, n); +} + +inline Tensor& chebyshev_polynomial_u_out( + Tensor& output, + const Tensor& x, + const Tensor& n) { + return torch::special_chebyshev_polynomial_u_out(output, x, n); +} + +inline Tensor& chebyshev_polynomial_u_out( + Tensor& output, + const Scalar& x, + const Tensor& n) { + return torch::special_chebyshev_polynomial_u_out(output, x, n); +} + +inline Tensor& chebyshev_polynomial_u_out( + Tensor& output, + const Tensor& x, + const Scalar& n) { + return torch::special_chebyshev_polynomial_u_out(output, x, n); +} + +/// Chebyshev polynomial of the third kind. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.chebyshev_polynomial_v. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto n = torch::randn(128, dtype=kDouble); +/// +/// torch::special::chebyshev_polynomial_v(x, n); +/// ``` +inline Tensor chebyshev_polynomial_v(const Tensor& x, const Tensor& n) { + return torch::special_chebyshev_polynomial_v(x, n); +} + +inline Tensor chebyshev_polynomial_v(const Scalar& x, const Tensor& n) { + return torch::special_chebyshev_polynomial_v(x, n); +} + +inline Tensor chebyshev_polynomial_v(const Tensor& x, const Scalar& n) { + return torch::special_chebyshev_polynomial_v(x, n); +} + +inline Tensor& chebyshev_polynomial_v_out( + Tensor& output, + const Tensor& x, + const Tensor& n) { + return torch::special_chebyshev_polynomial_v_out(output, x, n); +} + +inline Tensor& chebyshev_polynomial_v_out( + Tensor& output, + const Scalar& x, + const Tensor& n) { + return torch::special_chebyshev_polynomial_v_out(output, x, n); +} + +inline Tensor& chebyshev_polynomial_v_out( + Tensor& output, + const Tensor& x, + const Scalar& n) { + return torch::special_chebyshev_polynomial_v_out(output, x, n); +} + +/// Chebyshev polynomial of the fourth kind. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.chebyshev_polynomial_w. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto n = torch::randn(128, dtype=kDouble); +/// +/// torch::special::chebyshev_polynomial_w(x, n); +/// ``` +inline Tensor chebyshev_polynomial_w(const Tensor& x, const Tensor& n) { + return torch::special_chebyshev_polynomial_w(x, n); +} + +inline Tensor chebyshev_polynomial_w(const Scalar& x, const Tensor& n) { + return torch::special_chebyshev_polynomial_w(x, n); +} + +inline Tensor chebyshev_polynomial_w(const Tensor& x, const Scalar& n) { + return torch::special_chebyshev_polynomial_w(x, n); +} + +inline Tensor& chebyshev_polynomial_w_out( + Tensor& output, + const Tensor& x, + const Tensor& n) { + return torch::special_chebyshev_polynomial_w_out(output, x, n); +} + +inline Tensor& chebyshev_polynomial_w_out( + Tensor& output, + const Scalar& x, + const Tensor& n) { + return torch::special_chebyshev_polynomial_w_out(output, x, n); +} + +inline Tensor& chebyshev_polynomial_w_out( + Tensor& output, + const Tensor& x, + const Scalar& n) { + return torch::special_chebyshev_polynomial_w_out(output, x, n); +} + +/// Physicist’s Hermite polynomial. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.hermite_polynomial_h. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto n = torch::randn(128, dtype=kDouble); +/// +/// torch::special::hermite_polynomial_h(x, n); +/// ``` +inline Tensor hermite_polynomial_h(const Tensor& x, const Tensor& n) { + return torch::special_hermite_polynomial_h(x, n); +} + +inline Tensor hermite_polynomial_h(const Scalar& x, const Tensor& n) { + return torch::special_hermite_polynomial_h(x, n); +} + +inline Tensor hermite_polynomial_h(const Tensor& x, const Scalar& n) { + return torch::special_hermite_polynomial_h(x, n); +} + +inline Tensor& hermite_polynomial_h_out( + Tensor& output, + const Tensor& x, + const Tensor& n) { + return torch::special_hermite_polynomial_h_out(output, x, n); +} + +inline Tensor& hermite_polynomial_h_out( + Tensor& output, + const Scalar& x, + const Tensor& n) { + return torch::special_hermite_polynomial_h_out(output, x, n); +} + +inline Tensor& hermite_polynomial_h_out( + Tensor& output, + const Tensor& x, + const Scalar& n) { + return torch::special_hermite_polynomial_h_out(output, x, n); +} + +/// Probabilist’s Hermite polynomial. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.hermite_polynomial_he. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto n = torch::randn(128, dtype=kDouble); +/// +/// torch::special::hermite_polynomial_he(x, n); +/// ``` +inline Tensor hermite_polynomial_he(const Tensor& x, const Tensor& n) { + return torch::special_hermite_polynomial_he(x, n); +} + +inline Tensor hermite_polynomial_he(const Scalar& x, const Tensor& n) { + return torch::special_hermite_polynomial_he(x, n); +} + +inline Tensor hermite_polynomial_he(const Tensor& x, const Scalar& n) { + return torch::special_hermite_polynomial_he(x, n); +} + +inline Tensor& hermite_polynomial_he_out( + Tensor& output, + const Tensor& x, + const Tensor& n) { + return torch::special_hermite_polynomial_he_out(output, x, n); +} + +inline Tensor& hermite_polynomial_he_out( + Tensor& output, + const Scalar& x, + const Tensor& n) { + return torch::special_hermite_polynomial_he_out(output, x, n); +} + +inline Tensor& hermite_polynomial_he_out( + Tensor& output, + const Tensor& x, + const Scalar& n) { + return torch::special_hermite_polynomial_he_out(output, x, n); +} + +/// Laguerre polynomial. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.laguerre_polynomial_l. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto n = torch::randn(128, dtype=kDouble); +/// +/// torch::special::laguerre_polynomial_l(x, n); +/// ``` +inline Tensor laguerre_polynomial_l(const Tensor& x, const Tensor& n) { + return torch::special_laguerre_polynomial_l(x, n); +} + +inline Tensor laguerre_polynomial_l(const Scalar& x, const Tensor& n) { + return torch::special_laguerre_polynomial_l(x, n); +} + +inline Tensor laguerre_polynomial_l(const Tensor& x, const Scalar& n) { + return torch::special_laguerre_polynomial_l(x, n); +} + +inline Tensor& laguerre_polynomial_l_out( + Tensor& output, + const Tensor& x, + const Tensor& n) { + return torch::special_laguerre_polynomial_l_out(output, x, n); +} + +inline Tensor& laguerre_polynomial_l_out( + Tensor& output, + const Scalar& x, + const Tensor& n) { + return torch::special_laguerre_polynomial_l_out(output, x, n); +} + +inline Tensor& laguerre_polynomial_l_out( + Tensor& output, + const Tensor& x, + const Scalar& n) { + return torch::special_laguerre_polynomial_l_out(output, x, n); +} + +/// Legendre polynomial. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.legendre_polynomial_p. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto n = torch::randn(128, dtype=kDouble); +/// +/// torch::special::legendre_polynomial_p(x, n); +/// ``` +inline Tensor legendre_polynomial_p(const Tensor& x, const Tensor& n) { + return torch::special_legendre_polynomial_p(x, n); +} + +inline Tensor legendre_polynomial_p(const Scalar& x, const Tensor& n) { + return torch::special_legendre_polynomial_p(x, n); +} + +inline Tensor legendre_polynomial_p(const Tensor& x, const Scalar& n) { + return torch::special_legendre_polynomial_p(x, n); +} + +inline Tensor& legendre_polynomial_p_out( + Tensor& output, + const Tensor& x, + const Tensor& n) { + return torch::special_legendre_polynomial_p_out(output, x, n); +} + +inline Tensor& legendre_polynomial_p_out( + Tensor& output, + const Scalar& x, + const Tensor& n) { + return torch::special_legendre_polynomial_p_out(output, x, n); +} + +inline Tensor& legendre_polynomial_p_out( + Tensor& output, + const Tensor& x, + const Scalar& n) { + return torch::special_legendre_polynomial_p_out(output, x, n); +} + +/// Modified Bessel function of the first kind of order 0. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.modified_bessel_i0. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// +/// torch::special::modified_bessel_i0(x); +/// ``` +inline Tensor modified_bessel_i0(const Tensor& self) { + return torch::special_modified_bessel_i0(self); +} + +inline Tensor& modified_bessel_i0_out(Tensor& result, const Tensor& self) { + return torch::special_modified_bessel_i0_out(result, self); +} + +/// Modified Bessel function of the first kind of order 1. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.modified_bessel_i1. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// +/// torch::special::modified_bessel_i1(x); +/// ``` +inline Tensor modified_bessel_i1(const Tensor& self) { + return torch::special_modified_bessel_i1(self); +} + +inline Tensor& modified_bessel_i1_out(Tensor& result, const Tensor& self) { + return torch::special_modified_bessel_i1_out(result, self); +} + +/// Modified Bessel function of the second kind of order 0. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.modified_bessel_k0. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// +/// torch::special::modified_bessel_k0(x); +/// ``` +inline Tensor modified_bessel_k0(const Tensor& self) { + return torch::special_modified_bessel_k0(self); +} + +inline Tensor& modified_bessel_k0_out(Tensor& result, const Tensor& self) { + return torch::special_modified_bessel_k0_out(result, self); +} + +/// Modified Bessel function of the second kind of order 1. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.modified_bessel_k1. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// +/// torch::special::modified_bessel_k1(x); +/// ``` +inline Tensor modified_bessel_k1(const Tensor& self) { + return torch::special_modified_bessel_k1(self); +} + +inline Tensor& modified_bessel_k1_out(Tensor& result, const Tensor& self) { + return torch::special_modified_bessel_k1_out(result, self); +} + +/// Scaled modified Bessel function of the second kind of order 0. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.scaled_modified_bessel_k0. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// +/// torch::special::scaled_modified_bessel_k0(x); +/// ``` +inline Tensor scaled_modified_bessel_k0(const Tensor& x) { + return torch::special_scaled_modified_bessel_k0(x); +} + +inline Tensor& scaled_modified_bessel_k0_out(Tensor& y, const Tensor& x) { + return torch::special_scaled_modified_bessel_k0_out(y, x); +} + +/// Scaled modified Bessel function of the second kind of order 1. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.scaled_modified_bessel_k1. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// +/// torch::special::scaled_modified_bessel_k1(x); +/// ``` +inline Tensor scaled_modified_bessel_k1(const Tensor& x) { + return torch::special_scaled_modified_bessel_k1(x); +} + +inline Tensor& scaled_modified_bessel_k1_out(Tensor& y, const Tensor& x) { + return torch::special_scaled_modified_bessel_k1_out(y, x); +} + +/// Shifted Chebyshev polynomial of the first kind. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.shifted_chebyshev_polynomial_t. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto n = torch::randn(128, dtype=kDouble); +/// +/// torch::special::shifted_chebyshev_polynomial_t(x, n); +/// ``` +inline Tensor shifted_chebyshev_polynomial_t(const Tensor& x, const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_t(x, n); +} + +inline Tensor shifted_chebyshev_polynomial_t(const Scalar& x, const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_t(x, n); +} + +inline Tensor shifted_chebyshev_polynomial_t(const Tensor& x, const Scalar& n) { + return torch::special_shifted_chebyshev_polynomial_t(x, n); +} + +inline Tensor& shifted_chebyshev_polynomial_t_out( + Tensor& output, + const Tensor& x, + const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_t_out(output, x, n); +} + +inline Tensor& shifted_chebyshev_polynomial_t_out( + Tensor& output, + const Scalar& x, + const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_t_out(output, x, n); +} + +inline Tensor& shifted_chebyshev_polynomial_t_out( + Tensor& output, + const Tensor& x, + const Scalar& n) { + return torch::special_shifted_chebyshev_polynomial_t_out(output, x, n); +} + +/// Shifted Chebyshev polynomial of the second kind. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.shifted_chebyshev_polynomial_u. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto n = torch::randn(128, dtype=kDouble); +/// +/// torch::special::shifted_chebyshev_polynomial_u(x, n); +/// ``` +inline Tensor shifted_chebyshev_polynomial_u(const Tensor& x, const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_u(x, n); +} + +inline Tensor shifted_chebyshev_polynomial_u(const Scalar& x, const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_u(x, n); +} + +inline Tensor shifted_chebyshev_polynomial_u(const Tensor& x, const Scalar& n) { + return torch::special_shifted_chebyshev_polynomial_u(x, n); +} + +inline Tensor& shifted_chebyshev_polynomial_u_out( + Tensor& output, + const Tensor& x, + const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_u_out(output, x, n); +} + +inline Tensor& shifted_chebyshev_polynomial_u_out( + Tensor& output, + const Scalar& x, + const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_u_out(output, x, n); +} + +inline Tensor& shifted_chebyshev_polynomial_u_out( + Tensor& output, + const Tensor& x, + const Scalar& n) { + return torch::special_shifted_chebyshev_polynomial_u_out(output, x, n); +} + +/// Shifted Chebyshev polynomial of the third kind. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.shifted_chebyshev_polynomial_v. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto n = torch::randn(128, dtype=kDouble); +/// +/// torch::special::shifted_chebyshev_polynomial_v(x, n); +/// ``` +inline Tensor shifted_chebyshev_polynomial_v(const Tensor& x, const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_v(x, n); +} + +inline Tensor shifted_chebyshev_polynomial_v(const Scalar& x, const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_v(x, n); +} + +inline Tensor shifted_chebyshev_polynomial_v(const Tensor& x, const Scalar& n) { + return torch::special_shifted_chebyshev_polynomial_v(x, n); +} + +inline Tensor& shifted_chebyshev_polynomial_v_out( + Tensor& output, + const Tensor& x, + const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_v_out(output, x, n); +} + +inline Tensor& shifted_chebyshev_polynomial_v_out( + Tensor& output, + const Scalar& x, + const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_v_out(output, x, n); +} + +inline Tensor& shifted_chebyshev_polynomial_v_out( + Tensor& output, + const Tensor& x, + const Scalar& n) { + return torch::special_shifted_chebyshev_polynomial_v_out(output, x, n); +} + +/// Shifted Chebyshev polynomial of the fourth kind. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.shifted_chebyshev_polynomial_w. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// auto n = torch::randn(128, dtype=kDouble); +/// +/// torch::special::shifted_chebyshev_polynomial_w(x, n); +/// ``` +inline Tensor shifted_chebyshev_polynomial_w(const Tensor& x, const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_w(x, n); +} + +inline Tensor shifted_chebyshev_polynomial_w(const Scalar& x, const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_w(x, n); +} + +inline Tensor shifted_chebyshev_polynomial_w(const Tensor& x, const Scalar& n) { + return torch::special_shifted_chebyshev_polynomial_w(x, n); +} + +inline Tensor& shifted_chebyshev_polynomial_w_out( + Tensor& output, + const Tensor& x, + const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_w_out(output, x, n); +} + +inline Tensor& shifted_chebyshev_polynomial_w_out( + Tensor& output, + const Scalar& x, + const Tensor& n) { + return torch::special_shifted_chebyshev_polynomial_w_out(output, x, n); +} + +inline Tensor& shifted_chebyshev_polynomial_w_out( + Tensor& output, + const Tensor& x, + const Scalar& n) { + return torch::special_shifted_chebyshev_polynomial_w_out(output, x, n); +} + +/// Spherical Bessel function of the first kind of order 0. +/// +/// See +/// https://pytorch.org/docs/master/special.html#torch.special.spherical_bessel_j0. +/// +/// Example: +/// +/// ``` +/// auto x = torch::randn(128, dtype=kDouble); +/// +/// torch::special::spherical_bessel_j0(x); +/// ``` +inline Tensor spherical_bessel_j0(const Tensor& x) { + return torch::special_spherical_bessel_j0(x); +} + +inline Tensor& spherical_bessel_j0_out(Tensor& y, const Tensor& x) { + return torch::special_spherical_bessel_j0_out(y, x); +} +} // namespace special +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/torch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/torch.h new file mode 100644 index 0000000000000000000000000000000000000000..7316af88d2eba7337086b29d099370bf30aa99dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/torch.h @@ -0,0 +1,8 @@ +#pragma once + +#include + +#ifdef TORCH_API_INCLUDE_EXTENSION_H +#include + +#endif // defined(TORCH_API_INCLUDE_EXTENSION_H) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/types.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/types.h new file mode 100644 index 0000000000000000000000000000000000000000..92be710cf4bf464bb26a0fba519b8cbb7eb3b37b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/types.h @@ -0,0 +1,65 @@ +#pragma once + +#include + +#include + +#include +#include + +// TODO: These don't really belong here but torchvision builds in CI need them +// Remove once the torchvision version being compiled in CI is updated +#include +#include + +namespace torch { + +// NOTE [ Exposing declarations in `at::` to `torch::` ] +// +// The following line `using namespace at;` is responsible for exposing all +// declarations in `at::` namespace to `torch::` namespace. +// +// According to the rules laid out in +// https://en.cppreference.com/w/cpp/language/qualified_lookup, section +// "Namespace members": +// ``` +// Qualified lookup within the scope of a namespace N first considers all +// declarations that are located in N and all declarations that are located in +// the inline namespace members of N (and, transitively, in their inline +// namespace members). If there are no declarations in that set then it +// considers declarations in all namespaces named by using-directives found in N +// and in all transitive inline namespace members of N. +// ``` +// +// This means that if both `at::` and `torch::` namespaces have a function with +// the same signature (e.g. both `at::func()` and `torch::func()` exist), after +// `namespace torch { using namespace at; }`, when we call `torch::func()`, the +// `func()` function defined in `torch::` namespace will always be called, and +// the `func()` function defined in `at::` namespace is always hidden. +using namespace at; // NOLINT + +using c10::nullopt; +using c10::optional; + +using Dtype = at::ScalarType; + +/// Fixed width dtypes. +constexpr auto kUInt8 = at::kByte; +constexpr auto kInt8 = at::kChar; +constexpr auto kInt16 = at::kShort; +constexpr auto kInt32 = at::kInt; +constexpr auto kInt64 = at::kLong; +constexpr auto kFloat16 = at::kHalf; +constexpr auto kFloat32 = at::kFloat; +constexpr auto kFloat64 = at::kDouble; + +/// Rust-style short dtypes. +constexpr auto kU8 = kUInt8; +constexpr auto kI8 = kInt8; +constexpr auto kI16 = kInt16; +constexpr auto kI32 = kInt32; +constexpr auto kI64 = kInt64; +constexpr auto kF16 = kFloat16; +constexpr auto kF32 = kFloat32; +constexpr auto kF64 = kFloat64; +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/utils.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..004a0064636ef4b83899c769107fa844002d534f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/utils.h @@ -0,0 +1,116 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { + +/// A RAII, thread-local guard that disabled gradient calculation. +/// +/// Disabling gradient calculation is useful for inference, when you are sure +/// that you will not call `at::Tensor::backward`. It will reduce memory +/// consumption for computations that would otherwise have `requires_grad() == +/// true`. +/// +/// In this mode, the result of every computation will have +/// `requires_grad() == false`, even when the inputs have `requires_grad() == +/// true`. +/// +/// This context manager is thread-local; it will not affect computation +/// in other threads. +/// +/// Example: +/// @code +/// auto x = torch::tensor({1.}, torch::requires_grad()); +/// { +/// torch::NoGradGuard no_grad; +/// auto y = x * 2; +/// std::cout << y.requires_grad() << std::endl; // prints `false` +/// } +/// { +/// auto doubler = [](torch::Tensor x) { +/// torch::NoGradGuard no_grad; +/// return x * 2; +/// }; +/// auto z = doubler(x); +/// std::cout << z.requires_grad() << std::endl; // prints `false` +/// } +/// @endcode +using NoGradGuard = at::NoGradGuard; + +/// A RAII, thread-local guard that sets gradient calculation to on or off. +/// +/// ``AutoGradMode`` will enable or disable grads based on its argument +/// `enabled`. +/// +/// This context manager is thread-local; it will not affect computation +/// in other threads. +/// +/// \param enabled: Flag whether to enable grad (``true``), or disable +/// (``false``). This can be used to conditionally enable +/// gradients. +/// +/// Example: +/// @code +/// auto x = torch::tensor({1.}, torch::requires_grad()); +/// { +/// torch::AutoGradMode enable_grad(true); +/// auto y = x * 2; +/// std::cout << y.requires_grad() << std::endl; // prints `true` +/// } +/// { +/// torch::AutoGradMode enable_grad(false); +/// auto y = x * 2; +/// std::cout << y.requires_grad() << std::endl; // prints `false` +/// } +/// @endcode +using AutoGradMode = at::AutoGradMode; + +/// Sets the global random seed for all newly created CPU and CUDA tensors. +using at::manual_seed; + +// Called during new thread initialization +using at::init_num_threads; + +// Returns the number of threads used in parallel region. +using at::get_num_threads; + +// Sets the number of threads to be used in parallel region. +using at::set_num_threads; + +// Returns the number of threads used for inter-op parallelism. +using at::get_num_interop_threads; + +// Sets the number of threads to be used for inter-op parallelism. +using at::set_num_interop_threads; + +// Returns true if both t1, t2 are undefined or both are defined and equal +inline bool equal_if_defined(Tensor t1, Tensor t2) { + return ( + (!t1.defined() && !t2.defined()) || + (t1.defined() && t2.defined() && torch::equal(t1, t2))); +} + +// RecordFunction API +using at::addGlobalCallback; +using at::addThreadLocalCallback; +using at::CallbackHandle; +using at::clearCallbacks; +using at::clearGlobalCallbacks; +using at::clearThreadLocalCallbacks; +using at::DisableRecordFunctionGuard; +using at::enableRecordFunction; +using at::hasCallbacks; +using at::hasGlobalCallbacks; +using at::hasThreadLocalCallbacks; +using at::isRecordFunctionEnabled; +using at::RecordFunction; +using at::RecordFunctionCallback; +using at::RecordFunctionGuard; +using at::removeCallback; + +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/version.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/version.h new file mode 100644 index 0000000000000000000000000000000000000000..647e3bd94bf43f2d3743a4a97a23dba8a23185da --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/version.h @@ -0,0 +1,14 @@ +#pragma once + +/// Indicates the major version of LibTorch. +#define TORCH_VERSION_MAJOR 2 + +/// Indicates the minor version of LibTorch. +#define TORCH_VERSION_MINOR 2 + +/// Indicates the patch version of LibTorch. +#define TORCH_VERSION_PATCH 2 + +/// Indicates the version of LibTorch. +#define TORCH_VERSION \ + "2.2.2" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h new file mode 100644 index 0000000000000000000000000000000000000000..2fc0efb8cdc717b27c3adc31103fff3e5e86a783 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h @@ -0,0 +1,71 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +using torch::jit::Operator; + +// A ScriptCall instance represents an invocation of a builtin operator for a +// TorchScript function. If it is a builtin operator, it +// contains a shared ptr to the `Operator` and a list of arguments. +// If it is a TorchScript function, it contains a non empty qualifiedName string +// to the TorchScript function schema name and a list of arguments. +class TORCH_API ScriptCall : public RpcCommandBase { + public: + // Constructor for builitin operator call. + ScriptCall(std::shared_ptr op, std::vector&& stack); + // Constructor for TorchScript function call. + ScriptCall( + const c10::QualifiedName& qualifiedName, + std::vector&& stack, + const bool isAsyncExecution = false); + + bool hasOp() const; + std::shared_ptr op() const; + bool hasQualifiedName() const; + const c10::QualifiedName& qualifiedName() const; + // return the argument stack of this builtin operator + const std::vector& stack() const; + std::vector& stackRef(); + inline bool isAsyncExecution() const { + return isAsyncExecution_; + } + + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage(const Message& message); + + ~ScriptCall() override = default; + + protected: + virtual void toIValues(std::vector& ivalues) const; + static std::unique_ptr fromIValues( + std::vector& ivalues); + + private: + // Given an operator symbol and a string schema, return the matched operator. + static std::shared_ptr matchOperator(const std::string& str_schema); + + static const std::string BUILTIN_OP_NAMESPACE_; + static const std::string ATEN_PREFIX_; + + // This field has value if this ScriptCall represents invocation of a builtin + // operator. + c10::optional> op_; + // This field has non empty string if this ScriptCall represents invocation of + // an annotated torchscript function defined by users. + c10::optional qualifiedName_; + std::vector stack_; + const bool isAsyncExecution_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h new file mode 100644 index 0000000000000000000000000000000000000000..9c4029bead95cefbdec8c522943f86d52d99fc11 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h @@ -0,0 +1,66 @@ +#pragma once + +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +using worker_id_t = int16_t; +using local_id_t = int64_t; + +bool getAllowJitRRefPickle(); +TORCH_API void enableJitRRefPickle(); +TORCH_API void disableJitRRefPickle(); + +struct TORCH_API JitRRefPickleGuard { + JitRRefPickleGuard(); + ~JitRRefPickleGuard(); +}; + +struct TORCH_API GloballyUniqueId final { + GloballyUniqueId(worker_id_t createdOn, local_id_t localId); + GloballyUniqueId(const GloballyUniqueId& other) = default; + GloballyUniqueId& operator=(const GloballyUniqueId& other) = delete; + + bool operator==(const GloballyUniqueId& other) const; + bool operator!=(const GloballyUniqueId& other) const; + + at::IValue toIValue() const; + static GloballyUniqueId fromIValue(const at::IValue&); + + struct Hash { + size_t operator()(const GloballyUniqueId& key) const { + return (uint64_t(key.createdOn_) << kLocalIdBits) | key.localId_; + } + }; + + static constexpr int kLocalIdBits = 48; + + const worker_id_t createdOn_; + const local_id_t localId_; +}; + +TORCH_API std::ostream& operator<<( + std::ostream& os, + const GloballyUniqueId& globalId); + +using RRefId = GloballyUniqueId; +using ForkId = GloballyUniqueId; +using ProfilingId = GloballyUniqueId; + +struct TORCH_API SerializedPyObj final { + SerializedPyObj(std::string&& payload, std::vector&& tensors) + : payload_(std::move(payload)), tensors_(std::move(tensors)) {} + + std::vector toIValues() &&; + static SerializedPyObj fromIValues(std::vector value); + + std::string payload_; + std::vector tensors_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/interface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/interface.h new file mode 100644 index 0000000000000000000000000000000000000000..dd6e853daa8a712dd0d5e726fff8c1da84e36b47 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/interface.h @@ -0,0 +1,131 @@ +#pragma once + +#include +#include + +// WARNING: Be careful when adding new includes here. This header will be used +// in model.so, and should not refer to any aten/c10 headers except the stable +// C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule +// applies to other files under torch/csrc/inductor/aoti_runtime/. +#include + +#ifdef __GNUC__ +#define AOT_INDUCTOR_EXPORT __attribute__((__visibility__("default"))) +#else // !__GNUC__ +#ifdef _WIN32 +#define AOT_INDUCTOR_EXPORT __declspec(dllexport) +#else // !_WIN32 +#define AOT_INDUCTOR_EXPORT +#endif // _WIN32 +#endif // __GNUC__ + +using AOTIRuntimeError = int32_t; +#define AOTI_RUNTIME_SUCCESS 0 +#define AOTI_RUNTIME_FAILURE 1 + +#define AOTI_RUNTIME_ERROR_CODE_CHECK(call) \ + if ((call) != AOTI_RUNTIME_SUCCESS) { \ + throw std::runtime_error( \ + std::string(#call " API call failed at ") + __FILE__ + ", line " + \ + std::to_string(__LINE__)); \ + } + +extern "C" { +struct AOTInductorModelOpaque; +using AOTInductorModelHandle = AOTInductorModelOpaque*; + +struct AOTInductorModelContainerOpaque; +using AOTInductorModelContainerHandle = AOTInductorModelContainerOpaque*; + +struct AOTInductorStreamOpaque; +using AOTInductorStreamHandle = AOTInductorStreamOpaque*; + +struct AOTInductorConstantMap; +using AOTInductorConstantMapHandle = AOTInductorConstantMap*; + +// Creates an AOTInductor model container. The parameter num_models +// specifies the number of model instances that may be run concurrently for +// the same input model. +AOTIRuntimeError AOTInductorModelContainerCreate( + AOTInductorModelContainerHandle* container_handle, + size_t num_models, + bool is_cpu, + const char* cubin_dir); + +// Deletes the AOTInductor model container. +AOTIRuntimeError AOTInductorModelContainerDelete( + AOTInductorModelContainerHandle container_handle); + +// Runs the inference. +AOTIRuntimeError AOTInductorModelContainerRun( + AOTInductorModelContainerHandle container_handle, + AtenTensorHandle* input_handles, // array of input AtenTensorHandle; handles + // are stolen; the array itself is borrowed + size_t num_inputs, + AtenTensorHandle* + output_handles, // array for writing output AtenTensorHandle; handles + // will be stolen by the caller; the array itself is + // borrowed + size_t num_outputs, + AOTInductorStreamHandle stream_handle, + AOTIProxyExecutorHandle proxy_executor_handle); + +// Retrieves the number of inputs for the model. +AOTIRuntimeError AOTInductorModelContainerGetNumInputs( + AOTInductorModelContainerHandle container_handle, + size_t* ret_num_inputs); + +// Retrieves the input name at the given index. +AOTIRuntimeError AOTInductorModelContainerGetInputName( + AOTInductorModelContainerHandle container_handle, + size_t input_idx, + const char** ret_input_names); + +// Retrieves the number of outputs for the model. +AOTIRuntimeError AOTInductorModelContainerGetNumOutputs( + AOTInductorModelContainerHandle container_handle, + size_t* ret_num_outputs); + +// Retrieves the output name at the given index. +AOTIRuntimeError AOTInductorModelContainerGetOutputName( + AOTInductorModelContainerHandle container_handle, + size_t output_idx, + const char** ret_output_names); + +// Creates an AOTInductorModel instance. This is a thin and light wrapper +// around the compiled model; it doesn't handle concurrency, queueing, device +// management, etc. Use this if bare-metal performance is needed and you are +// willing to handle other "management" aspects yourself. +// +// constant_map_handle is an opaque type to satisfy the C ABI. It should be a +// std::unordered_map*. +AOTIRuntimeError AOTInductorModelCreate( + AOTInductorModelHandle* model_handle, + AOTInductorConstantMapHandle constant_map_handle); + +// Run an AOTInductorModel (see AOTInductorModelCreate for when one should use +// this function versus AOTInductorModelContainerRun). +AOTIRuntimeError AOTInductorModelRun( + AOTInductorModelHandle model_handle, + AtenTensorHandle* input_handles, + AtenTensorHandle* output_handles); + +// Replace AOTInductorModel's constant map. Note it doesn't handle concurrency +// so be sure to handle ordering if AOTInductorModelRun is ran concurrently. +AOTIRuntimeError AOTInductorModelUpdateConstantsMap( + AOTInductorModelHandle model_handle, + AOTInductorConstantMapHandle constant_map_handle); + +// Delete an AOTInductorModel created by AOTInductorModelCreate. +AOTIRuntimeError AOTInductorModelDelete(AOTInductorModelHandle model_handle); + +AOTIRuntimeError AOTInductorModelGetNumOutputs( + AOTInductorModelHandle model_handle, + size_t* ret_num_outputs); + +AOTIRuntimeError AOTInductorModelContainerGetCallSpec( + AOTInductorModelContainerHandle container_handle, + const char** in_spec, + const char** out_spec); + +} // extern "C" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model.h new file mode 100644 index 0000000000000000000000000000000000000000..2860a8b251e6d6b264e152e97357fcde358c1bdc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model.h @@ -0,0 +1,525 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +// WARNING: Be careful when adding new includes here. This header will be used +// in model.so, and should not refer to any aten/c10 headers except the stable +// C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule +// applies to other files under torch/csrc/inductor/aoti_runtime/. +#include +#include + +#define AOTI_RUNTIME_CHECK(EXPR, MSG) \ + do { \ + bool ok = EXPR; \ + if (!ok) { \ + throw std::runtime_error(MSG); \ + } \ + } while (0) + +#if defined(__GNUC__) || defined(__clang__) +#define AOTI_NOINLINE __attribute__((noinline)) +#elif _MSC_VER +#define AOTI_NOINLINE __declspec(noinline) +#else +#define AOTI_NOINLINE +#endif + +// At codegen time, we write out a binary file called constants.bin. +// We then turn the raw binary to an object file that exposes this +// symbol and link it into the final .so. +// For information on the binary format, see `man objcopy`, under +// the "binary-architecture" flag: +// https://man7.org/linux/man-pages/man1/objcopy.1.html +// todo: use #embed in C++ 23 once available +extern const uint8_t _binary_constants_bin_start[]; +extern const uint8_t _binary_constants_bin_end[]; + +#define AOTI_CONST_GPU_ALIGNMENT 64 + +namespace { + +#ifdef USE_CUDA + +using CUDAPtr = std::unique_ptr>; + +CUDAPtr RAII_cudaMalloc(size_t num_bytes) { + void* data_ptr; + AOTI_RUNTIME_DEVICE_CHECK(cudaMalloc((void**)&data_ptr, num_bytes)); + auto deleter = [](void* ptr) { AOTI_RUNTIME_DEVICE_CHECK(cudaFree(ptr)); }; + return CUDAPtr(data_ptr, deleter); +} + +#endif // USE_CUDA + +} // anonymous namespace + +AOTI_NOINLINE static void throw_exception( + const char* call, + const char* file, + int64_t line) { + std::stringstream ss; + ss << call << " API call failed at " << file << ", line " << line; + throw std::runtime_error(ss.str()); +} + +#define AOTI_TORCH_ERROR_CODE_CHECK(call) \ + if ((call) != AOTI_TORCH_SUCCESS) { \ + throw_exception(#call, __FILE__, __LINE__); \ + } + +using DeleterFnPtr = void (*)(void*); + +namespace torch { +namespace aot_inductor { + +inline void delete_tensor_object(void* ptr) { + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_delete_tensor_object(reinterpret_cast(ptr))); +} + +// RAIIAtenTensorHandle steals the tensor objects created by the libtorch C ABI +class RAIIAtenTensorHandle { + public: + RAIIAtenTensorHandle() = delete; + RAIIAtenTensorHandle(const RAIIAtenTensorHandle& other) = delete; + RAIIAtenTensorHandle& operator=(const RAIIAtenTensorHandle& other) = delete; + + // Steal the ownership from another RAIIAtenTensorHandle using std::move + RAIIAtenTensorHandle(RAIIAtenTensorHandle&& other) = default; + RAIIAtenTensorHandle& operator=(RAIIAtenTensorHandle&& other) = default; + + // Steal the ownership from raw AtenTensorHandle + RAIIAtenTensorHandle(AtenTensorHandle handle) + : handle_(handle, delete_tensor_object) {} + + ~RAIIAtenTensorHandle() { + handle_.reset(); + } + + // Return a raw AtenTensorHandle to be used by aoti_torch functions + // Note: this function does NOT transfer the ownership of the handle + operator AtenTensorHandle() const { + return handle_.get(); + } + + AtenTensorHandle release() { + return handle_.release(); + } + + AtenTensorHandle get() { + return handle_.get(); + } + + void reset() { + handle_.reset(); + } + + int64_t size(int64_t d) { + int64_t size; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_size(handle_.get(), d, &size)); + return size; + } + + int64_t stride(int64_t d) { + int64_t stride; + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_get_stride(handle_.get(), d, &stride)); + return stride; + } + + int64_t storage_offset() { + int64_t storage_offset; + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_get_storage_offset(handle_.get(), &storage_offset)); + return storage_offset; + } + + private: + std::unique_ptr handle_; +}; + +using ConstantMap = std::unordered_map; + +// Steal the ownership from raw AtenTensorHandle to RAIIAtenTensorHandle +inline std::vector steal_from_raw_handles_to_raii_handles( + AtenTensorHandle* handles, + size_t size) { + std::vector result; + result.reserve(size); + for (size_t i = 0; i < size; i++) { + result.emplace_back(handles[i]); + handles[i] = nullptr; + } + return result; +} + +// Defines the base class for AOTInductorModel, which is generated by the +// AOTInductor cpp codegen. Since we do not need dynamic dispatch, we rely +// on curiously recurring template pattern (CRTP) to save some runtime +// v-table overhead. The generated AOTInductorModel is specialized with +// methods such as run_impl. +template +class AOTInductorModelBase { + public: + AOTInductorModelBase( + size_t num_inputs, + size_t num_outputs, + size_t num_constants, + std::optional cubin_dir) + : inputs_info_(num_inputs), + outputs_info_(num_outputs), + constants_info_(num_constants), + cubin_dir_(cubin_dir), + device_idx_(-1) { +#ifdef USE_CUDA + AOTI_RUNTIME_DEVICE_CHECK(cudaGetDevice(&device_idx_)); +#endif // USE_CUDA + } + + ~AOTInductorModelBase() { +#ifdef USE_CUDA + if (run_finished_) { + auto code = cudaEventDestroy(*run_finished_); + if (code != cudaSuccess) { + std::cerr << "Failed to destroy CUDA event in AOTInductor model: " + << cudaGetErrorString(code) << std::endl; + } + } +#endif // USE_CUDA + } + + AOTInductorModelBase(AOTInductorModelBase&&) = delete; + AOTInductorModelBase& operator=(AOTInductorModelBase&&) = delete; + AOTInductorModelBase(const AOTInductorModelBase&) = delete; + AOTInductorModelBase& operator=(const AOTInductorModelBase&) = delete; + + void run( + AtenTensorHandle* + input_handles, // array of input AtenTensorHandle; handles + // are stolen; the array itself is borrowed + AtenTensorHandle* + output_handles, // array for writing output AtenTensorHandle; handles + // will be stolen by the caller; the array itself is + // borrowed + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor) { +#ifdef USE_CUDA + if (!run_finished_) { + cudaEvent_t run_finished; + AOTI_RUNTIME_DEVICE_CHECK(cudaEventCreate(&run_finished)); + run_finished_.emplace(run_finished); + } + + auto* model = static_cast(this); + model->run_impl(input_handles, output_handles, stream, proxy_executor); + AOTI_RUNTIME_DEVICE_CHECK(cudaEventRecord(*run_finished_, stream)); +#else // !USE_CUDA + run_finished_ = false; + auto* model = static_cast(this); + model->run_impl(input_handles, output_handles, stream, proxy_executor); + run_finished_ = true; +#endif // USE_CUDA + } + + void load_constants(bool is_cpu) { + size_t num_constants = this->num_constants(); + constants_map_->reserve(num_constants); + + std::vector constants_internal_offset(num_constants); + if (!is_cpu) { + make_cuda_constant_blob(constants_internal_offset); + } + + size_t bytes_read = 0; + for (size_t i = 0; i < num_constants; i++) { + std::string name = this->constant_name(i); + size_t data_size = this->constant_data_size(i); + uint8_t* internal_ptr = (data_size != 0) + ? constant_ptr(constants_internal_offset[i], bytes_read, data_size) + : nullptr; + bytes_read += data_size; + + // Create at::Tensor from copied memory. + auto dtype = this->constant_type(i); + auto ndim = this->constant_ndim(i); + auto size = this->constant_shape(i); + auto stride = this->constant_stride(i); + auto offset = this->constant_offset(i); + + auto device_type = aoti_torch_device_type_cuda(); + if (is_cpu) { + device_type = aoti_torch_device_type_cpu(); + } + + AtenTensorHandle tensor_handle; + int device_idx = -1; // should be the same as was used for constant_blob_ +#ifdef USE_CUDA + AOTI_RUNTIME_DEVICE_CHECK(cudaGetDevice(&device_idx)); +#endif // USE_CUDA + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_create_tensor_from_blob( + internal_ptr, + ndim, + size, + stride, + offset, + dtype, + device_type, + device_idx, + &tensor_handle)); + constants_map_->emplace(std::move(name), tensor_handle); + } + this->update_constants_map(constants_map_); + } + +#ifdef USE_CUDA + CUDAPtr&& release_constant_blob() { + return std::move(constant_blob_); + } +#endif + + uint8_t* constant_ptr( + size_t constant_offset, + size_t bytes_read, + size_t data_size) { +#ifdef USE_CUDA + auto* constants_ptr = static_cast(constant_blob_.get()); + uint8_t* internal_ptr = constants_ptr + constant_offset; + // Copy data to GPU memory + // TODO: Handle shared storage case. + AOTI_RUNTIME_DEVICE_CHECK(cudaMemcpy( + internal_ptr, + _binary_constants_bin_start + bytes_read, + data_size, + cudaMemcpyHostToDevice)); + return internal_ptr; +#else // !USE_CUDA + // get pointer to constant which is packed in model during compile time. + return const_cast(_binary_constants_bin_start) + bytes_read; +#endif // USE_CUDA + } + + void make_cuda_constant_blob(std::vector& constants_internal_offset) { +#ifdef USE_CUDA + size_t num_constants = this->num_constants(); + // Compute required blob size with 64-alignment if on GPU. + size_t max_blob = 0; + for (size_t i = 0; i < num_constants; i++) { + size_t data_size = this->constant_data_size(i); + if (data_size % AOTI_CONST_GPU_ALIGNMENT) { + data_size = AOTI_CONST_GPU_ALIGNMENT + + (data_size / AOTI_CONST_GPU_ALIGNMENT) * AOTI_CONST_GPU_ALIGNMENT; + } + constants_internal_offset[i] = max_blob; + max_blob += data_size; + } + constant_blob_ = RAII_cudaMalloc(max_blob); +#endif // USE_CUDA + } + + size_t num_inputs() const { + return inputs_info_.size(); + } + + size_t num_outputs() const { + return outputs_info_.size(); + } + + size_t num_constants() const { + return constants_info_.size(); + } + + const char* input_name(int64_t idx) const { + return inputs_info_.at(idx).name; + } + + const char* output_name(int64_t idx) const { + return outputs_info_.at(idx).name; + } + + const char* constant_name(int64_t idx) const { + return constants_info_.at(idx).name; + } + + size_t constant_ndim(int64_t idx) { + return constants_info_.at(idx).shape.size(); + } + + const int64_t* constant_shape(int64_t idx) const { + return constants_info_.at(idx).shape.data(); + } + + const int64_t* constant_stride(int64_t idx) const { + return constants_info_.at(idx).stride.data(); + } + + int32_t constant_type(int64_t idx) const { + return constants_info_.at(idx).dtype; + } + + size_t constant_offset(int64_t idx) const { + return constants_info_.at(idx).offset; + } + + size_t constant_data_size(int64_t idx) const { + return constants_info_.at(idx).data_size; + } + + const char* get_in_spec() const { + return in_spec_.c_str(); + } + + const char* get_out_spec() const { + return out_spec_.c_str(); + } + + void update_constants_map(std::shared_ptr constants_map) { + constants_map_ = std::move(constants_map); + if (!constants_map_) { + return; + } + constants_.resize(constants_info_.size()); + int idx = 0; + for (const auto& info : constants_info_) { + const auto it = constants_map_->find(info.name); + if (it != constants_map_->end()) { + constants_[idx] = it->second; + } + idx++; + } + } + + /// Returns true if the model is complete. + bool is_finished() { +#ifdef USE_CUDA + if (!run_finished_) { + throw std::runtime_error{"Model CUDA event was not initialized"}; + } + + auto event_status = cudaEventQuery(*run_finished_); + if (event_status == cudaSuccess) { + return true; + } else if (event_status == cudaErrorNotReady) { + return false; + } + + throw std::runtime_error( + std::string("The model did not finish successfully. Error: ") + + cudaGetErrorString(cudaGetLastError())); +#else // !USE_CUDA + return run_finished_; +#endif // USE_CUDA + } + + /// Synchronizes completion event. + void wait_for_completion() { +#ifdef USE_CUDA + if (!run_finished_) { + throw std::runtime_error{"Model event was not initialized"}; + } + + AOTI_RUNTIME_DEVICE_CHECK(cudaEventSynchronize(*run_finished_)); +#endif // USE_CUDA + } + + protected: + struct ParamInfo { + const char* name = nullptr; + }; + + struct ConstInfo { + const char* name = nullptr; + std::vector shape; + std::vector stride; + int32_t dtype; + int64_t offset; + size_t data_size; + }; + + std::vector inputs_info_; + std::vector outputs_info_; + std::vector constants_info_; + std::string in_spec_; + std::string out_spec_; + + std::shared_ptr constants_map_; + std::vector constants_; + +#ifdef USE_CUDA + // Holds the blob storage for constants' at::Tensor for CUDA. + CUDAPtr constant_blob_; +#endif // USE_CUDA + + // A directory with CUDA binary files, e.g. compiled kernels, etc. + const std::optional cubin_dir_; + + // Record if the model finishes an inference run so that its owning + // AOTModelContainer can re-use this instance. +#ifdef USE_CUDA + std::optional run_finished_; +#else // !USE_CUDA + bool run_finished_; +#endif + + // Generated model uses this device index to create CUDA guards. + int device_idx_; +}; + +// Codegen-ed classes can derive from this to keep pointers to loaded kernels. +class AOTInductorModelKernelsBase { + public: + virtual ~AOTInductorModelKernelsBase() = default; +}; + +class AOTInductorModel : public AOTInductorModelBase { + public: + AOTInductorModel(std::shared_ptr, std::optional); + + void run_impl( + AtenTensorHandle* + input_handles, // array of input AtenTensorHandle; handles + // are stolen; the array itself is borrowed + AtenTensorHandle* + output_handles, // array for writing output AtenTensorHandle; handles + // will be stolen by the caller; the array itself is + // borrowed + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor); + + static std::unique_ptr Create( + std::shared_ptr constants, + std::optional cubin_dir) { + return std::make_unique(std::move(constants), cubin_dir); + } + + private: + std::unique_ptr kernels_; +}; + +#ifdef USE_CUDA +class AOTICudaStreamGuard { + public: + AOTICudaStreamGuard(cudaStream_t stream, int32_t device_index) { + CUDAStreamGuardHandle ptr; + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_create_cuda_stream_guard(stream, device_index, &ptr)); + guard_ = + std::unique_ptr>(ptr, [](void* ptr) { + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_delete_cuda_stream_guard( + reinterpret_cast(ptr))); + }); + } + + private: + std::unique_ptr> guard_; +}; +#endif // USE_CUDA + +} // namespace aot_inductor +} // namespace torch